VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 76553

最後變更 在這個檔案從76553是 76553,由 vboxsync 提交於 6 年 前

scm --update-copyright-year

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 337.8 KB
 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
23/**
24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
25 * relative offsets.
26 */
27# ifdef IEM_WITH_CODE_TLB
28# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
29# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
30# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
31# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
32# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
33# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
34# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
35# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
36# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
37# else /* !IEM_WITH_CODE_TLB */
38# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
39 do \
40 { \
41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
43 } while (0)
44
45# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
46
47# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
48 do \
49 { \
50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
54 } while (0)
55
56# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
61 } while (0)
62
63# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
64 do \
65 { \
66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
72 } while (0)
73
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
75 do \
76 { \
77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
79 } while (0)
80
81# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
82 do \
83 { \
84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
86 } while (0)
87
88# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
89 do \
90 { \
91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
97 } while (0)
98# endif /* !IEM_WITH_CODE_TLB */
99
100/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
101# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
102
103/** Whether a shadow VMCS is present for the given VCPU. */
104# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
105
106/** Gets the VMXON region pointer. */
107# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
108
109/** Gets the guest-physical address of the current VMCS for the given VCPU. */
110# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
111
112/** Whether a current VMCS is present for the given VCPU. */
113# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
114
115/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
116# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
117 do \
118 { \
119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
121 } while (0)
122
123/** Clears any current VMCS for the given VCPU. */
124# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
125 do \
126 { \
127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
128 } while (0)
129
130/** Check for VMX instructions requiring to be in VMX operation.
131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
132# define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
133 do \
134 { \
135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
136 { /* likely */ } \
137 else \
138 { \
139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
141 return iemRaiseUndefinedOpcode(a_pVCpu); \
142 } \
143 } while (0)
144
145/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
146# define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
147 do \
148 { \
149 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
150 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
152 return VERR_VMX_VMENTRY_FAILED; \
153 } while (0)
154
155/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
156# define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
157 do \
158 { \
159 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
160 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
162 return VERR_VMX_VMEXIT_FAILED; \
163 } while (0)
164
165/** Enables/disables IEM-only EM execution policy in and from ring-3. */
166# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
167# define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix) \
168 do { \
169 Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
170 return EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \
171 } while (0)
172
173# define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix) \
174 do { \
175 Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
176 EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \
177 } while (0)
178# else
179# define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix) do { return VINF_SUCCESS; } while (0)
180# define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix) do { } while (0)
181# endif
182
183
184/*********************************************************************************************************************************
185* Global Variables *
186*********************************************************************************************************************************/
187/** @todo NSTVMX: The following VM-exit intercepts are pending:
188 * VMX_EXIT_IO_SMI
189 * VMX_EXIT_SMI
190 * VMX_EXIT_INT_WINDOW
191 * VMX_EXIT_NMI_WINDOW
192 * VMX_EXIT_GETSEC
193 * VMX_EXIT_RSM
194 * VMX_EXIT_MTF
195 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
196 * VMX_EXIT_ERR_MACHINE_CHECK
197 * VMX_EXIT_TPR_BELOW_THRESHOLD
198 * VMX_EXIT_APIC_ACCESS
199 * VMX_EXIT_VIRTUALIZED_EOI
200 * VMX_EXIT_EPT_VIOLATION
201 * VMX_EXIT_EPT_MISCONFIG
202 * VMX_EXIT_INVEPT
203 * VMX_EXIT_PREEMPT_TIMER
204 * VMX_EXIT_INVVPID
205 * VMX_EXIT_APIC_WRITE
206 * VMX_EXIT_RDRAND
207 * VMX_EXIT_VMFUNC
208 * VMX_EXIT_ENCLS
209 * VMX_EXIT_RDSEED
210 * VMX_EXIT_PML_FULL
211 * VMX_EXIT_XSAVES
212 * VMX_EXIT_XRSTORS
213 */
214/**
215 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
216 *
217 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
218 * second dimension is the Index, see VMXVMCSFIELDENC.
219 */
220uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
221{
222 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
223 {
224 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
225 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
226 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
227 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
229 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
230 },
231 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
232 {
233 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
234 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
235 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
236 /* 24-25 */ UINT16_MAX, UINT16_MAX
237 },
238 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
239 {
240 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
241 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
242 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
243 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
244 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
245 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
246 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
247 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
248 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
249 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
250 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
251 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
252 },
253 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
254 {
255 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
256 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
257 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
258 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
259 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
260 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
261 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
262 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
263 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
264 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
267 {
268 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
269 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
270 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
271 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
272 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
273 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
274 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
275 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
276 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
277 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
278 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
279 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
280 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
281 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
282 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
283 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
284 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
285 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
286 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
287 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
288 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
289 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
290 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
291 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
292 /* 24 */ UINT16_MAX,
293 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
294 },
295 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
296 {
297 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
298 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
299 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
300 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
301 /* 25 */ UINT16_MAX
302 },
303 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
304 {
305 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
306 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
307 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
308 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
309 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
310 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
311 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
312 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
313 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
314 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
315 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
316 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
317 },
318 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
319 {
320 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
321 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
322 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
323 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
324 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
325 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
326 },
327 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
328 {
329 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
330 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
331 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
332 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
333 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
334 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
335 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
336 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
337 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
338 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
339 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
340 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
341 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
342 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
343 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
344 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
345 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
346 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
347 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
348 },
349 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
350 {
351 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
352 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
353 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
354 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
355 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
356 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
357 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
358 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
359 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
360 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
361 /* 24-25 */ UINT16_MAX, UINT16_MAX
362 },
363 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
364 {
365 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
366 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
367 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
368 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
369 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
370 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
371 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
372 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
373 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
374 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
375 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
376 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
377 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
378 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
379 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
380 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
381 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
382 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
383 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
384 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
385 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
386 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
387 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
388 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
389 /* 24-25 */ UINT16_MAX, UINT16_MAX
390 },
391 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
392 {
393 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
394 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
395 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
396 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
397 /* 25 */ UINT16_MAX
398 },
399 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
400 {
401 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
402 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
403 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
404 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
405 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
406 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
407 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
408 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
409 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
410 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
411 /* 24-25 */ UINT16_MAX, UINT16_MAX
412 },
413 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
414 {
415 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
416 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
417 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
418 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
419 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
420 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
421 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
422 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
423 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
424 },
425 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
426 {
427 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
428 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
429 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
430 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
431 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
432 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
433 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
434 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
435 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
436 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
437 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
438 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
439 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
440 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
441 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
442 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
443 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
444 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
445 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
446 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
447 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
448 },
449 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
450 {
451 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
452 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
453 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
454 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
455 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
456 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
457 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
458 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
459 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
460 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
461 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
462 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
463 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
464 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
465 }
466};
467
468
469/**
470 * Returns whether the given VMCS field is valid and supported by our emulation.
471 *
472 * @param pVCpu The cross context virtual CPU structure.
473 * @param u64FieldEnc The VMCS field encoding.
474 *
475 * @remarks This takes into account the CPU features exposed to the guest.
476 */
477IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
478{
479 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
480 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
481 if (!uFieldEncHi)
482 { /* likely */ }
483 else
484 return false;
485
486 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
487 switch (uFieldEncLo)
488 {
489 /*
490 * 16-bit fields.
491 */
492 /* Control fields. */
493 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
494 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
495 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
496
497 /* Guest-state fields. */
498 case VMX_VMCS16_GUEST_ES_SEL:
499 case VMX_VMCS16_GUEST_CS_SEL:
500 case VMX_VMCS16_GUEST_SS_SEL:
501 case VMX_VMCS16_GUEST_DS_SEL:
502 case VMX_VMCS16_GUEST_FS_SEL:
503 case VMX_VMCS16_GUEST_GS_SEL:
504 case VMX_VMCS16_GUEST_LDTR_SEL:
505 case VMX_VMCS16_GUEST_TR_SEL:
506 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
507 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
508
509 /* Host-state fields. */
510 case VMX_VMCS16_HOST_ES_SEL:
511 case VMX_VMCS16_HOST_CS_SEL:
512 case VMX_VMCS16_HOST_SS_SEL:
513 case VMX_VMCS16_HOST_DS_SEL:
514 case VMX_VMCS16_HOST_FS_SEL:
515 case VMX_VMCS16_HOST_GS_SEL:
516 case VMX_VMCS16_HOST_TR_SEL: return true;
517
518 /*
519 * 64-bit fields.
520 */
521 /* Control fields. */
522 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
523 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
524 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
525 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
526 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
527 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
528 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
529 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
530 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
531 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
532 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
533 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
534 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
535 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
536 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
537 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
538 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
539 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
540 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
541 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
542 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
543 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
544 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
545 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
546 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
547 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
548 case VMX_VMCS64_CTRL_EPTP_FULL:
549 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
550 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
551 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
552 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
553 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
554 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
555 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
556 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
557 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
558 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
559 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
560 {
561 uint64_t const uVmFuncMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64VmFunc;
562 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
563 }
564 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
565 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
566 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
567 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
568 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
569 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
570 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
571 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
572 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
573 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
574 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
575 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
576
577 /* Read-only data fields. */
578 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
579 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
580
581 /* Guest-state fields. */
582 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
583 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
584 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
585 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
586 case VMX_VMCS64_GUEST_PAT_FULL:
587 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
588 case VMX_VMCS64_GUEST_EFER_FULL:
589 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
590 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
591 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
592 case VMX_VMCS64_GUEST_PDPTE0_FULL:
593 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
594 case VMX_VMCS64_GUEST_PDPTE1_FULL:
595 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
596 case VMX_VMCS64_GUEST_PDPTE2_FULL:
597 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
598 case VMX_VMCS64_GUEST_PDPTE3_FULL:
599 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
600 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
601 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
602
603 /* Host-state fields. */
604 case VMX_VMCS64_HOST_PAT_FULL:
605 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
606 case VMX_VMCS64_HOST_EFER_FULL:
607 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
608 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
609 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
610
611 /*
612 * 32-bit fields.
613 */
614 /* Control fields. */
615 case VMX_VMCS32_CTRL_PIN_EXEC:
616 case VMX_VMCS32_CTRL_PROC_EXEC:
617 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
618 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
619 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
620 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
621 case VMX_VMCS32_CTRL_EXIT:
622 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
623 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
624 case VMX_VMCS32_CTRL_ENTRY:
625 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
626 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
627 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
628 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
629 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
630 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
631 case VMX_VMCS32_CTRL_PLE_GAP:
632 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
633
634 /* Read-only data fields. */
635 case VMX_VMCS32_RO_VM_INSTR_ERROR:
636 case VMX_VMCS32_RO_EXIT_REASON:
637 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
638 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
639 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
640 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
641 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
642 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
643
644 /* Guest-state fields. */
645 case VMX_VMCS32_GUEST_ES_LIMIT:
646 case VMX_VMCS32_GUEST_CS_LIMIT:
647 case VMX_VMCS32_GUEST_SS_LIMIT:
648 case VMX_VMCS32_GUEST_DS_LIMIT:
649 case VMX_VMCS32_GUEST_FS_LIMIT:
650 case VMX_VMCS32_GUEST_GS_LIMIT:
651 case VMX_VMCS32_GUEST_LDTR_LIMIT:
652 case VMX_VMCS32_GUEST_TR_LIMIT:
653 case VMX_VMCS32_GUEST_GDTR_LIMIT:
654 case VMX_VMCS32_GUEST_IDTR_LIMIT:
655 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
656 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
657 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
658 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
659 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
660 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
661 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
662 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
663 case VMX_VMCS32_GUEST_INT_STATE:
664 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
665 case VMX_VMCS32_GUEST_SMBASE:
666 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
667 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
668
669 /* Host-state fields. */
670 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
671
672 /*
673 * Natural-width fields.
674 */
675 /* Control fields. */
676 case VMX_VMCS_CTRL_CR0_MASK:
677 case VMX_VMCS_CTRL_CR4_MASK:
678 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
679 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
680 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
681 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
682 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
683 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
684
685 /* Read-only data fields. */
686 case VMX_VMCS_RO_EXIT_QUALIFICATION:
687 case VMX_VMCS_RO_IO_RCX:
688 case VMX_VMCS_RO_IO_RSX:
689 case VMX_VMCS_RO_IO_RDI:
690 case VMX_VMCS_RO_IO_RIP:
691 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
692
693 /* Guest-state fields. */
694 case VMX_VMCS_GUEST_CR0:
695 case VMX_VMCS_GUEST_CR3:
696 case VMX_VMCS_GUEST_CR4:
697 case VMX_VMCS_GUEST_ES_BASE:
698 case VMX_VMCS_GUEST_CS_BASE:
699 case VMX_VMCS_GUEST_SS_BASE:
700 case VMX_VMCS_GUEST_DS_BASE:
701 case VMX_VMCS_GUEST_FS_BASE:
702 case VMX_VMCS_GUEST_GS_BASE:
703 case VMX_VMCS_GUEST_LDTR_BASE:
704 case VMX_VMCS_GUEST_TR_BASE:
705 case VMX_VMCS_GUEST_GDTR_BASE:
706 case VMX_VMCS_GUEST_IDTR_BASE:
707 case VMX_VMCS_GUEST_DR7:
708 case VMX_VMCS_GUEST_RSP:
709 case VMX_VMCS_GUEST_RIP:
710 case VMX_VMCS_GUEST_RFLAGS:
711 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
712 case VMX_VMCS_GUEST_SYSENTER_ESP:
713 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
714
715 /* Host-state fields. */
716 case VMX_VMCS_HOST_CR0:
717 case VMX_VMCS_HOST_CR3:
718 case VMX_VMCS_HOST_CR4:
719 case VMX_VMCS_HOST_FS_BASE:
720 case VMX_VMCS_HOST_GS_BASE:
721 case VMX_VMCS_HOST_TR_BASE:
722 case VMX_VMCS_HOST_GDTR_BASE:
723 case VMX_VMCS_HOST_IDTR_BASE:
724 case VMX_VMCS_HOST_SYSENTER_ESP:
725 case VMX_VMCS_HOST_SYSENTER_EIP:
726 case VMX_VMCS_HOST_RSP:
727 case VMX_VMCS_HOST_RIP: return true;
728 }
729
730 return false;
731}
732
733
734/**
735 * Gets a host selector from the VMCS.
736 *
737 * @param pVmcs Pointer to the virtual VMCS.
738 * @param iSelReg The index of the segment register (X86_SREG_XXX).
739 */
740DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
741{
742 Assert(iSegReg < X86_SREG_COUNT);
743 RTSEL HostSel;
744 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
745 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
746 uint8_t const uWidthType = (uWidth << 2) | uType;
747 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
748 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
749 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
750 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
751 uint8_t const *pbField = pbVmcs + offField;
752 HostSel = *(uint16_t *)pbField;
753 return HostSel;
754}
755
756
757/**
758 * Sets a guest segment register in the VMCS.
759 *
760 * @param pVmcs Pointer to the virtual VMCS.
761 * @param iSegReg The index of the segment register (X86_SREG_XXX).
762 * @param pSelReg Pointer to the segment register.
763 */
764IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
765{
766 Assert(pSelReg);
767 Assert(iSegReg < X86_SREG_COUNT);
768
769 /* Selector. */
770 {
771 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
772 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
773 uint8_t const uWidthType = (uWidth << 2) | uType;
774 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
775 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
776 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
777 uint8_t *pbVmcs = (uint8_t *)pVmcs;
778 uint8_t *pbField = pbVmcs + offField;
779 *(uint16_t *)pbField = pSelReg->Sel;
780 }
781
782 /* Limit. */
783 {
784 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
785 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
786 uint8_t const uWidthType = (uWidth << 2) | uType;
787 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
788 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
789 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
790 uint8_t *pbVmcs = (uint8_t *)pVmcs;
791 uint8_t *pbField = pbVmcs + offField;
792 *(uint32_t *)pbField = pSelReg->u32Limit;
793 }
794
795 /* Base. */
796 {
797 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
798 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
799 uint8_t const uWidthType = (uWidth << 2) | uType;
800 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
801 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
802 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
803 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
804 uint8_t const *pbField = pbVmcs + offField;
805 *(uint64_t *)pbField = pSelReg->u64Base;
806 }
807
808 /* Attributes. */
809 {
810 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
811 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
812 | X86DESCATTR_UNUSABLE;
813 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
814 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
815 uint8_t const uWidthType = (uWidth << 2) | uType;
816 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
817 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
818 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
819 uint8_t *pbVmcs = (uint8_t *)pVmcs;
820 uint8_t *pbField = pbVmcs + offField;
821 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
822 }
823}
824
825
826/**
827 * Gets a guest segment register from the VMCS.
828 *
829 * @returns VBox status code.
830 * @param pVmcs Pointer to the virtual VMCS.
831 * @param iSegReg The index of the segment register (X86_SREG_XXX).
832 * @param pSelReg Where to store the segment register (only updated when
833 * VINF_SUCCESS is returned).
834 *
835 * @remarks Warning! This does not validate the contents of the retrieved segment
836 * register.
837 */
838IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
839{
840 Assert(pSelReg);
841 Assert(iSegReg < X86_SREG_COUNT);
842
843 /* Selector. */
844 uint16_t u16Sel;
845 {
846 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
847 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
848 uint8_t const uWidthType = (uWidth << 2) | uType;
849 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
850 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
851 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
852 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
853 uint8_t const *pbField = pbVmcs + offField;
854 u16Sel = *(uint16_t *)pbField;
855 }
856
857 /* Limit. */
858 uint32_t u32Limit;
859 {
860 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
861 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
862 uint8_t const uWidthType = (uWidth << 2) | uType;
863 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
864 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
865 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
866 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
867 uint8_t const *pbField = pbVmcs + offField;
868 u32Limit = *(uint32_t *)pbField;
869 }
870
871 /* Base. */
872 uint64_t u64Base;
873 {
874 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
875 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
876 uint8_t const uWidthType = (uWidth << 2) | uType;
877 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
878 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
879 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
880 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
881 uint8_t const *pbField = pbVmcs + offField;
882 u64Base = *(uint64_t *)pbField;
883 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
884 }
885
886 /* Attributes. */
887 uint32_t u32Attr;
888 {
889 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
890 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
891 uint8_t const uWidthType = (uWidth << 2) | uType;
892 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
893 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
894 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
895 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
896 uint8_t const *pbField = pbVmcs + offField;
897 u32Attr = *(uint32_t *)pbField;
898 }
899
900 pSelReg->Sel = u16Sel;
901 pSelReg->ValidSel = u16Sel;
902 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
903 pSelReg->u32Limit = u32Limit;
904 pSelReg->u64Base = u64Base;
905 pSelReg->Attr.u = u32Attr;
906 return VINF_SUCCESS;
907}
908
909
910/**
911 * Gets a CR3 target value from the VMCS.
912 *
913 * @returns VBox status code.
914 * @param pVmcs Pointer to the virtual VMCS.
915 * @param idxCr3Target The index of the CR3-target value to retrieve.
916 * @param puValue Where to store the CR3-target value.
917 */
918DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
919{
920 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
921 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
922 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
923 uint8_t const uWidthType = (uWidth << 2) | uType;
924 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
925 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
926 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
927 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
928 uint8_t const *pbField = pbVmcs + offField;
929 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
930
931 return uCr3TargetValue;
932}
933
934
935/**
936 * Converts an IEM exception event type to a VMX event type.
937 *
938 * @returns The VMX event type.
939 * @param uVector The interrupt / exception vector.
940 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
941 */
942DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
943{
944 /* Paranoia (callers may use these interchangeably). */
945 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
946 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
947 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
948 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
949 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
950 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
951 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
952 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
953 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
954 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
955 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
956 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
957
958 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
959 {
960 if (uVector == X86_XCPT_NMI)
961 return VMX_EXIT_INT_INFO_TYPE_NMI;
962 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
963 }
964
965 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
966 {
967 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
968 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
969 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
970 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
971 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
972 }
973
974 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
975 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
976}
977
978
979/**
980 * Sets the VM-instruction error VMCS field.
981 *
982 * @param pVCpu The cross context virtual CPU structure.
983 * @param enmInsErr The VM-instruction error.
984 */
985DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
986{
987 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
988 pVmcs->u32RoVmInstrError = enmInsErr;
989}
990
991
992/**
993 * Sets the VM-exit qualification VMCS field.
994 *
995 * @param pVCpu The cross context virtual CPU structure.
996 * @param uExitQual The VM-exit qualification.
997 */
998DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
999{
1000 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1001 pVmcs->u64RoExitQual.u = uExitQual;
1002}
1003
1004
1005/**
1006 * Sets the VM-exit interruption information field.
1007 *
1008 * @param pVCpu The cross context virtual CPU structure.
1009 * @param uExitQual The VM-exit interruption information.
1010 */
1011DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
1012{
1013 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1014 pVmcs->u32RoExitIntInfo = uExitIntInfo;
1015}
1016
1017
1018/**
1019 * Sets the VM-exit interruption error code.
1020 *
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param uErrCode The error code.
1023 */
1024DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1025{
1026 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1027 pVmcs->u32RoExitIntErrCode = uErrCode;
1028}
1029
1030
1031/**
1032 * Sets the IDT-vectoring information field.
1033 *
1034 * @param pVCpu The cross context virtual CPU structure.
1035 * @param uIdtVectorInfo The IDT-vectoring information.
1036 */
1037DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
1038{
1039 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1040 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
1041}
1042
1043
1044/**
1045 * Sets the IDT-vectoring error code field.
1046 *
1047 * @param pVCpu The cross context virtual CPU structure.
1048 * @param uErrCode The error code.
1049 */
1050DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1051{
1052 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1053 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
1054}
1055
1056
1057/**
1058 * Sets the VM-exit guest-linear address VMCS field.
1059 *
1060 * @param pVCpu The cross context virtual CPU structure.
1061 * @param uGuestLinearAddr The VM-exit guest-linear address.
1062 */
1063DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1064{
1065 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1066 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1067}
1068
1069
1070/**
1071 * Sets the VM-exit guest-physical address VMCS field.
1072 *
1073 * @param pVCpu The cross context virtual CPU structure.
1074 * @param uGuestPhysAddr The VM-exit guest-physical address.
1075 */
1076DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1077{
1078 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1079 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1080}
1081
1082
1083/**
1084 * Sets the VM-exit instruction length VMCS field.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param cbInstr The VM-exit instruction length in bytes.
1088 *
1089 * @remarks Callers may clear this field to 0. Hence, this function does not check
1090 * the validity of the instruction length.
1091 */
1092DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1093{
1094 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1095 pVmcs->u32RoExitInstrLen = cbInstr;
1096}
1097
1098
1099/**
1100 * Sets the VM-exit instruction info. VMCS field.
1101 *
1102 * @param pVCpu The cross context virtual CPU structure.
1103 * @param uExitInstrInfo The VM-exit instruction information.
1104 */
1105DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1106{
1107 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1108 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1109}
1110
1111
1112/**
1113 * Implements VMSucceed for VMX instruction success.
1114 *
1115 * @param pVCpu The cross context virtual CPU structure.
1116 */
1117DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1118{
1119 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1120}
1121
1122
1123/**
1124 * Implements VMFailInvalid for VMX instruction failure.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure.
1127 */
1128DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1129{
1130 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1131 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1132}
1133
1134
1135/**
1136 * Implements VMFailValid for VMX instruction failure.
1137 *
1138 * @param pVCpu The cross context virtual CPU structure.
1139 * @param enmInsErr The VM instruction error.
1140 */
1141DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1142{
1143 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1144 {
1145 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1146 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1147 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1148 }
1149}
1150
1151
1152/**
1153 * Implements VMFail for VMX instruction failure.
1154 *
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param enmInsErr The VM instruction error.
1157 */
1158DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1159{
1160 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1161 iemVmxVmFailValid(pVCpu, enmInsErr);
1162 else
1163 iemVmxVmFailInvalid(pVCpu);
1164}
1165
1166
1167/**
1168 * Checks if the given auto-load/store MSR area count is valid for the
1169 * implementation.
1170 *
1171 * @returns @c true if it's within the valid limit, @c false otherwise.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param uMsrCount The MSR area count to check.
1174 */
1175DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1176{
1177 uint64_t const u64VmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
1178 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1179 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1180 if (uMsrCount <= cMaxSupportedMsrs)
1181 return true;
1182 return false;
1183}
1184
1185
1186/**
1187 * Flushes the current VMCS contents back to guest memory.
1188 *
1189 * @returns VBox status code.
1190 * @param pVCpu The cross context virtual CPU structure.
1191 */
1192DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1193{
1194 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1195 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1196 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1197 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1198 return rc;
1199}
1200
1201
1202/**
1203 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 */
1207DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1208{
1209 iemVmxVmSucceed(pVCpu);
1210 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1211}
1212
1213
1214/**
1215 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1216 * nested-guest.
1217 *
1218 * @param iSegReg The segment index (X86_SREG_XXX).
1219 */
1220IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1221{
1222 switch (iSegReg)
1223 {
1224 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1225 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1226 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1227 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1228 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1229 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1230 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1231 }
1232}
1233
1234
1235/**
1236 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1237 * nested-guest that is in Virtual-8086 mode.
1238 *
1239 * @param iSegReg The segment index (X86_SREG_XXX).
1240 */
1241IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1242{
1243 switch (iSegReg)
1244 {
1245 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1246 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1247 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1248 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1249 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1250 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1251 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1252 }
1253}
1254
1255
1256/**
1257 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1258 * nested-guest that is in Virtual-8086 mode.
1259 *
1260 * @param iSegReg The segment index (X86_SREG_XXX).
1261 */
1262IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1263{
1264 switch (iSegReg)
1265 {
1266 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1267 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1268 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1269 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1270 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1271 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1272 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1273 }
1274}
1275
1276
1277/**
1278 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1279 * nested-guest that is in Virtual-8086 mode.
1280 *
1281 * @param iSegReg The segment index (X86_SREG_XXX).
1282 */
1283IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1284{
1285 switch (iSegReg)
1286 {
1287 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1288 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1289 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1290 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1291 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1292 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1293 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1294 }
1295}
1296
1297
1298/**
1299 * Gets the instruction diagnostic for segment attributes reserved bits failure
1300 * during VM-entry of a nested-guest.
1301 *
1302 * @param iSegReg The segment index (X86_SREG_XXX).
1303 */
1304IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1305{
1306 switch (iSegReg)
1307 {
1308 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1309 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1310 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1311 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1312 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1313 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1314 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1315 }
1316}
1317
1318
1319/**
1320 * Gets the instruction diagnostic for segment attributes descriptor-type
1321 * (code/segment or system) failure during VM-entry of a nested-guest.
1322 *
1323 * @param iSegReg The segment index (X86_SREG_XXX).
1324 */
1325IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1326{
1327 switch (iSegReg)
1328 {
1329 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1330 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1331 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1332 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1333 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1334 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1335 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1336 }
1337}
1338
1339
1340/**
1341 * Gets the instruction diagnostic for segment attributes descriptor-type
1342 * (code/segment or system) failure during VM-entry of a nested-guest.
1343 *
1344 * @param iSegReg The segment index (X86_SREG_XXX).
1345 */
1346IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1347{
1348 switch (iSegReg)
1349 {
1350 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1351 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1352 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1353 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1354 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1355 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1356 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1357 }
1358}
1359
1360
1361/**
1362 * Gets the instruction diagnostic for segment attribute granularity failure during
1363 * VM-entry of a nested-guest.
1364 *
1365 * @param iSegReg The segment index (X86_SREG_XXX).
1366 */
1367IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1368{
1369 switch (iSegReg)
1370 {
1371 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1372 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1373 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1374 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1375 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1376 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1377 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1378 }
1379}
1380
1381/**
1382 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1383 * VM-entry of a nested-guest.
1384 *
1385 * @param iSegReg The segment index (X86_SREG_XXX).
1386 */
1387IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1388{
1389 switch (iSegReg)
1390 {
1391 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1392 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1393 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1394 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1395 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1396 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1397 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1398 }
1399}
1400
1401
1402/**
1403 * Gets the instruction diagnostic for segment attribute type accessed failure
1404 * during VM-entry of a nested-guest.
1405 *
1406 * @param iSegReg The segment index (X86_SREG_XXX).
1407 */
1408IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1409{
1410 switch (iSegReg)
1411 {
1412 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1413 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1414 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1415 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1416 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1417 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1418 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1419 }
1420}
1421
1422
1423/**
1424 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1425 * failure during VM-entry of a nested-guest.
1426 *
1427 * @param iSegReg The PDPTE entry index.
1428 */
1429IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1430{
1431 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1432 switch (iPdpte)
1433 {
1434 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1435 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1436 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1437 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1438 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1439 }
1440}
1441
1442
1443/**
1444 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1445 * failure during VM-exit of a nested-guest.
1446 *
1447 * @param iSegReg The PDPTE entry index.
1448 */
1449IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1450{
1451 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1452 switch (iPdpte)
1453 {
1454 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1455 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1456 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1457 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1458 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1459 }
1460}
1461
1462
1463/**
1464 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
1465 * mask and the read-shadow (CR0/CR4 read).
1466 *
1467 * @returns The masked CR0/CR4.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 * @param iCrReg The control register (either CR0 or CR4).
1470 * @param uGuestCrX The current guest CR0 or guest CR4.
1471 */
1472IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
1473{
1474 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
1475 Assert(iCrReg == 0 || iCrReg == 4);
1476
1477 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1478 Assert(pVmcs);
1479
1480 /*
1481 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
1482 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
1483 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
1484 *
1485 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
1486 */
1487 uint64_t fGstHostMask;
1488 uint64_t fReadShadow;
1489 if (iCrReg == 0)
1490 {
1491 fGstHostMask = pVmcs->u64Cr0Mask.u;
1492 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
1493 }
1494 else
1495 {
1496 fGstHostMask = pVmcs->u64Cr4Mask.u;
1497 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
1498 }
1499
1500 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
1501 return fMaskedCrX;
1502}
1503
1504
1505/**
1506 * Saves the guest control registers, debug registers and some MSRs are part of
1507 * VM-exit.
1508 *
1509 * @param pVCpu The cross context virtual CPU structure.
1510 */
1511IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1512{
1513 /*
1514 * Saves the guest control registers, debug registers and some MSRs.
1515 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1516 */
1517 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1518
1519 /* Save control registers. */
1520 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1521 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1522 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1523
1524 /* Save SYSENTER CS, ESP, EIP. */
1525 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1526 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1527 {
1528 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1529 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1530 }
1531 else
1532 {
1533 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1534 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1535 }
1536
1537 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1538 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1539 {
1540 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1541 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1542 }
1543
1544 /* Save PAT MSR. */
1545 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1546 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1547
1548 /* Save EFER MSR. */
1549 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1550 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1551
1552 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1553 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1554
1555 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1556}
1557
1558
1559/**
1560 * Saves the guest force-flags in preparation of entering the nested-guest.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure.
1563 */
1564IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1565{
1566 /* We shouldn't be called multiple times during VM-entry. */
1567 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1568
1569 /* MTF should not be set outside VMX non-root mode. */
1570 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1571
1572 /*
1573 * Preserve the required force-flags.
1574 *
1575 * We cache and clear force-flags that would affect the execution of the
1576 * nested-guest. Cached flags are then restored while returning to the guest
1577 * if necessary.
1578 *
1579 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1580 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1581 * instruction. Interrupt inhibition for any nested-guest instruction
1582 * is supplied by the guest-interruptibility state VMCS field and will
1583 * be set up as part of loading the guest state.
1584 *
1585 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1586 * successful VM-entry (due to invalid guest-state) need to continue
1587 * blocking NMIs if it was in effect before VM-entry.
1588 *
1589 * - MTF need not be preserved as it's used only in VMX non-root mode and
1590 * is supplied through the VM-execution controls.
1591 *
1592 * The remaining FFs (e.g. timers, APIC updates) can stay in place so that
1593 * we will be able to generate interrupts that may cause VM-exits for
1594 * the nested-guest.
1595 */
1596 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1597}
1598
1599
1600/**
1601 * Restores the guest force-flags in preparation of exiting the nested-guest.
1602 *
1603 * @param pVCpu The cross context virtual CPU structure.
1604 */
1605IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1606{
1607 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1608 {
1609 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1610 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1611 }
1612}
1613
1614
1615/**
1616 * Perform a VMX transition updated PGM, IEM and CPUM.
1617 *
1618 * @param pVCpu The cross context virtual CPU structure.
1619 */
1620IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1621{
1622 /*
1623 * Inform PGM about paging mode changes.
1624 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1625 * see comment in iemMemPageTranslateAndCheckAccess().
1626 */
1627 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1628# ifdef IN_RING3
1629 Assert(rc != VINF_PGM_CHANGE_MODE);
1630# endif
1631 AssertRCReturn(rc, rc);
1632
1633 /* Inform CPUM (recompiler), can later be removed. */
1634 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1635
1636 /*
1637 * Flush the TLB with new CR3. This is required in case the PGM mode change
1638 * above doesn't actually change anything.
1639 */
1640 if (rc == VINF_SUCCESS)
1641 {
1642 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1643 AssertRCReturn(rc, rc);
1644 }
1645
1646 /* Re-initialize IEM cache/state after the drastic mode switch. */
1647 iemReInitExec(pVCpu);
1648 return rc;
1649}
1650
1651
1652/**
1653 * Calculates the current VMX-preemption timer value.
1654 *
1655 * @param pVCpu The cross context virtual CPU structure.
1656 */
1657IEM_STATIC uint32_t iemVmxCalcPreemptTimer(PVMCPU pVCpu)
1658{
1659 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1660 Assert(pVmcs);
1661
1662 /*
1663 * Assume the following:
1664 * PreemptTimerShift = 5
1665 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1666 * VmentryTick = 50000 (TSC at time of VM-entry)
1667 *
1668 * CurTick Delta PreemptTimerVal
1669 * ----------------------------------
1670 * 60000 10000 2
1671 * 80000 30000 1
1672 * 90000 40000 0 -> VM-exit.
1673 *
1674 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1675 * The saved VMX-preemption timer value is calculated as follows:
1676 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1677 * E.g.:
1678 * Delta = 10000
1679 * Tmp = 10000 / (2 * 10000) = 0.5
1680 * NewPt = 2 - 0.5 = 2
1681 * Delta = 30000
1682 * Tmp = 30000 / (2 * 10000) = 1.5
1683 * NewPt = 2 - 1.5 = 1
1684 * Delta = 40000
1685 * Tmp = 40000 / 20000 = 2
1686 * NewPt = 2 - 2 = 0
1687 */
1688 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1689 uint64_t const uVmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
1690 uint64_t const uDelta = uCurTick - uVmentryTick;
1691 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
1692 uint32_t const uPreemptTimer = uVmcsPreemptVal
1693 - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1694 return uPreemptTimer;
1695}
1696
1697
1698/**
1699 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1700 *
1701 * @param pVCpu The cross context virtual CPU structure.
1702 */
1703IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1704{
1705 /*
1706 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1707 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1708 */
1709 /* CS, SS, ES, DS, FS, GS. */
1710 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1711 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1712 {
1713 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1714 if (!pSelReg->Attr.n.u1Unusable)
1715 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1716 else
1717 {
1718 /*
1719 * For unusable segments the attributes are undefined except for CS and SS.
1720 * For the rest we don't bother preserving anything but the unusable bit.
1721 */
1722 switch (iSegReg)
1723 {
1724 case X86_SREG_CS:
1725 pVmcs->GuestCs = pSelReg->Sel;
1726 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1727 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1728 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1729 | X86DESCATTR_UNUSABLE);
1730 break;
1731
1732 case X86_SREG_SS:
1733 pVmcs->GuestSs = pSelReg->Sel;
1734 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1735 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1736 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1737 break;
1738
1739 case X86_SREG_DS:
1740 pVmcs->GuestDs = pSelReg->Sel;
1741 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1742 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1743 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1744 break;
1745
1746 case X86_SREG_ES:
1747 pVmcs->GuestEs = pSelReg->Sel;
1748 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1749 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1750 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1751 break;
1752
1753 case X86_SREG_FS:
1754 pVmcs->GuestFs = pSelReg->Sel;
1755 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1756 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1757 break;
1758
1759 case X86_SREG_GS:
1760 pVmcs->GuestGs = pSelReg->Sel;
1761 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1762 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1763 break;
1764 }
1765 }
1766 }
1767
1768 /* Segment attribute bits 31:7 and 11:8 MBZ. */
1769 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1770 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
1771 /* LDTR. */
1772 {
1773 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1774 pVmcs->GuestLdtr = pSelReg->Sel;
1775 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1776 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1777 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1778 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1779 }
1780
1781 /* TR. */
1782 {
1783 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1784 pVmcs->GuestTr = pSelReg->Sel;
1785 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1786 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1787 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1788 }
1789
1790 /* GDTR. */
1791 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1792 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1793
1794 /* IDTR. */
1795 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1796 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1797}
1798
1799
1800/**
1801 * Saves guest non-register state as part of VM-exit.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param uExitReason The VM-exit reason.
1805 */
1806IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
1807{
1808 /*
1809 * Save guest non-register state.
1810 * See Intel spec. 27.3.4 "Saving Non-Register State".
1811 */
1812 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1813
1814 /*
1815 * Activity state.
1816 * Most VM-exits will occur in the active state. However, if the first instruction
1817 * following the VM-entry is a HLT instruction, and the MTF VM-execution control is set,
1818 * the VM-exit will be from the HLT activity state.
1819 *
1820 * See Intel spec. 25.5.2 "Monitor Trap Flag".
1821 */
1822 /** @todo NSTVMX: Does triple-fault VM-exit reflect a shutdown activity state or
1823 * not? */
1824 EMSTATE enmActivityState = EMGetState(pVCpu);
1825 switch (enmActivityState)
1826 {
1827 case EMSTATE_HALTED: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_HLT; break;
1828 default: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_ACTIVE; break;
1829 }
1830
1831 /* Interruptibility-state. */
1832 pVmcs->u32GuestIntrState = 0;
1833 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1834 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1835
1836 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1837 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1838 {
1839 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1840 * currently. */
1841 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1842 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1843 }
1844 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1845
1846 /*
1847 * Pending debug exceptions.
1848 */
1849 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1850 && uExitReason != VMX_EXIT_SMI
1851 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1852 && !HMVmxIsVmexitTrapLike(uExitReason))
1853 {
1854 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1855 * block-by-MovSS is in effect. */
1856 pVmcs->u64GuestPendingDbgXcpt.u = 0;
1857 }
1858 else
1859 {
1860 /*
1861 * Pending debug exception field is identical to DR6 except the RTM bit (16) which needs to be flipped.
1862 * The "enabled breakpoint" bit (12) is not present in DR6, so we need to update it here.
1863 *
1864 * See Intel spec. 24.4.2 "Guest Non-Register State".
1865 */
1866 uint64_t fPendingDbgMask = pVCpu->cpum.GstCtx.dr[6];
1867 uint64_t const fBpHitMask = VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP0 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP1
1868 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP2 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP3;
1869 if (fPendingDbgMask & fBpHitMask)
1870 fPendingDbgMask |= VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP;
1871 fPendingDbgMask ^= VMX_VMCS_GUEST_PENDING_DEBUG_RTM;
1872 pVmcs->u64GuestPendingDbgXcpt.u = fPendingDbgMask;
1873 }
1874
1875 /*
1876 * Save the VMX-preemption timer value back into the VMCS if the feature is enabled.
1877 *
1878 * For VMX-preemption timer VM-exits, we should have already written back 0 if the
1879 * feature is supported back into the VMCS, and thus there is nothing further to do here.
1880 */
1881 if ( uExitReason != VMX_EXIT_PREEMPT_TIMER
1882 && (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
1883 pVmcs->u32PreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
1884
1885 /* PDPTEs. */
1886 /* We don't support EPT yet. */
1887 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
1888 pVmcs->u64GuestPdpte0.u = 0;
1889 pVmcs->u64GuestPdpte1.u = 0;
1890 pVmcs->u64GuestPdpte2.u = 0;
1891 pVmcs->u64GuestPdpte3.u = 0;
1892}
1893
1894
1895/**
1896 * Saves the guest-state as part of VM-exit.
1897 *
1898 * @returns VBox status code.
1899 * @param pVCpu The cross context virtual CPU structure.
1900 * @param uExitReason The VM-exit reason.
1901 */
1902IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
1903{
1904 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1905 Assert(pVmcs);
1906
1907 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1908 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1909
1910 /** @todo r=ramshankar: The below hack is no longer necessary because we invoke the
1911 * VM-exit after updating RIP. I'm leaving it in-place temporarily in case
1912 * we need to fix missing exit information or callers still setting
1913 * instruction-length field when it is not necessary. */
1914#if 0
1915 /*
1916 * Save guest RIP, RSP and RFLAGS.
1917 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
1918 *
1919 * For trap-like VM-exits we must advance the RIP by the length of the instruction.
1920 * Callers must pass the instruction length in the VM-exit instruction length
1921 * field though it is undefined for such VM-exits. After updating RIP here, we clear
1922 * the VM-exit instruction length field.
1923 *
1924 * See Intel spec. 27.1 "Architectural State Before A VM Exit"
1925 */
1926 if (HMVmxIsTrapLikeVmexit(uExitReason))
1927 {
1928 uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
1929 AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
1930 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1931 iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
1932 }
1933#endif
1934
1935 /* We don't support enclave mode yet. */
1936 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1937 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1938 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1939
1940 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1941}
1942
1943
1944/**
1945 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
1946 *
1947 * @returns VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure.
1949 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1950 */
1951IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
1952{
1953 /*
1954 * Save guest MSRs.
1955 * See Intel spec. 27.4 "Saving MSRs".
1956 */
1957 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1958 const char *const pszFailure = "VMX-abort";
1959
1960 /*
1961 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1962 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1963 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1964 */
1965 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
1966 if (!cMsrs)
1967 return VINF_SUCCESS;
1968
1969 /*
1970 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1971 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1972 * implementation causes a VMX-abort followed by a triple-fault.
1973 */
1974 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1975 if (fIsMsrCountValid)
1976 { /* likely */ }
1977 else
1978 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1979
1980 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
1981 Assert(pMsr);
1982 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1983 {
1984 if ( !pMsr->u32Reserved
1985 && pMsr->u32Msr != MSR_IA32_SMBASE
1986 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1987 {
1988 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1989 if (rcStrict == VINF_SUCCESS)
1990 continue;
1991
1992 /*
1993 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1994 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1995 * recording the MSR index in the auxiliary info. field and indicated further by our
1996 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1997 * if possible, or come up with a better, generic solution.
1998 */
1999 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2000 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
2001 ? kVmxVDiag_Vmexit_MsrStoreRing3
2002 : kVmxVDiag_Vmexit_MsrStore;
2003 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2004 }
2005 else
2006 {
2007 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2008 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
2009 }
2010 }
2011
2012 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
2013 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
2014 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
2015 if (RT_SUCCESS(rc))
2016 { /* likely */ }
2017 else
2018 {
2019 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2020 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
2021 }
2022
2023 NOREF(uExitReason);
2024 NOREF(pszFailure);
2025 return VINF_SUCCESS;
2026}
2027
2028
2029/**
2030 * Performs a VMX abort (due to an fatal error during VM-exit).
2031 *
2032 * @returns Strict VBox status code.
2033 * @param pVCpu The cross context virtual CPU structure.
2034 * @param enmAbort The VMX abort reason.
2035 */
2036IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
2037{
2038 /*
2039 * Perform the VMX abort.
2040 * See Intel spec. 27.7 "VMX Aborts".
2041 */
2042 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
2043
2044 /* We don't support SMX yet. */
2045 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
2046 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
2047 {
2048 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
2049 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, u32VmxAbortId);
2050 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
2051 }
2052
2053 return VINF_EM_TRIPLE_FAULT;
2054}
2055
2056
2057/**
2058 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2059 *
2060 * @param pVCpu The cross context virtual CPU structure.
2061 */
2062IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2063{
2064 /*
2065 * Load host control registers, debug registers and MSRs.
2066 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2067 */
2068 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2069 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2070
2071 /* CR0. */
2072 {
2073 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2074 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
2075 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2076 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2077 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2078 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2079 CPUMSetGuestCR0(pVCpu, uValidCr0);
2080 }
2081
2082 /* CR4. */
2083 {
2084 /* CR4 MB1 bits are not modified. */
2085 uint64_t const fCr4IgnMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
2086 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2087 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2088 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2089 if (fHostInLongMode)
2090 uValidCr4 |= X86_CR4_PAE;
2091 else
2092 uValidCr4 &= ~X86_CR4_PCIDE;
2093 CPUMSetGuestCR4(pVCpu, uValidCr4);
2094 }
2095
2096 /* CR3 (host value validated while checking host-state during VM-entry). */
2097 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2098
2099 /* DR7. */
2100 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2101
2102 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2103
2104 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2105 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2106 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2107 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2108
2109 /* FS, GS bases are loaded later while we load host segment registers. */
2110
2111 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2112 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2113 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2114 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2115 {
2116 if (fHostInLongMode)
2117 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2118 else
2119 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2120 }
2121
2122 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2123
2124 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2125 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2126 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2127
2128 /* We don't support IA32_BNDCFGS MSR yet. */
2129}
2130
2131
2132/**
2133 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2134 *
2135 * @param pVCpu The cross context virtual CPU structure.
2136 */
2137IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2138{
2139 /*
2140 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2141 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2142 *
2143 * Warning! Be careful to not touch fields that are reserved by VT-x,
2144 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2145 */
2146 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2147 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2148
2149 /* CS, SS, ES, DS, FS, GS. */
2150 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2151 {
2152 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2153 bool const fUnusable = RT_BOOL(HostSel == 0);
2154
2155 /* Selector. */
2156 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2157 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2158 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2159
2160 /* Limit. */
2161 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2162
2163 /* Base and Attributes. */
2164 switch (iSegReg)
2165 {
2166 case X86_SREG_CS:
2167 {
2168 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2169 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2170 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2171 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2172 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2173 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2174 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2175 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2176 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2177 Assert(!fUnusable);
2178 break;
2179 }
2180
2181 case X86_SREG_SS:
2182 case X86_SREG_ES:
2183 case X86_SREG_DS:
2184 {
2185 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2186 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2187 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2188 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2189 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2190 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2191 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2192 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2193 break;
2194 }
2195
2196 case X86_SREG_FS:
2197 {
2198 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2199 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2200 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2201 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2202 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2203 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2204 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2205 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2206 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2207 break;
2208 }
2209
2210 case X86_SREG_GS:
2211 {
2212 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2213 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2214 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2215 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2216 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2217 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2218 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2219 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2220 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2221 break;
2222 }
2223 }
2224 }
2225
2226 /* TR. */
2227 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2228 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2229 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2230 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2231 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2232 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2233 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2234 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2235 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2236 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2237 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2238 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2239 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2240
2241 /* LDTR. */
2242 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2243 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2244 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2245 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2246 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2247 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2248
2249 /* GDTR. */
2250 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2251 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2252 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2253
2254 /* IDTR.*/
2255 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2256 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2257 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2258}
2259
2260
2261/**
2262 * Checks host PDPTes as part of VM-exit.
2263 *
2264 * @param pVCpu The cross context virtual CPU structure.
2265 * @param uExitReason The VM-exit reason (for logging purposes).
2266 */
2267IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2268{
2269 /*
2270 * Check host PDPTEs.
2271 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2272 */
2273 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2274 const char *const pszFailure = "VMX-abort";
2275 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2276
2277 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2278 && !fHostInLongMode)
2279 {
2280 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2281 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2282 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2283 if (RT_SUCCESS(rc))
2284 {
2285 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2286 {
2287 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2288 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2289 { /* likely */ }
2290 else
2291 {
2292 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2293 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2294 }
2295 }
2296 }
2297 else
2298 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2299 }
2300
2301 NOREF(pszFailure);
2302 NOREF(uExitReason);
2303 return VINF_SUCCESS;
2304}
2305
2306
2307/**
2308 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2309 *
2310 * @returns VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure.
2312 * @param pszInstr The VMX instruction name (for logging purposes).
2313 */
2314IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2315{
2316 /*
2317 * Load host MSRs.
2318 * See Intel spec. 27.6 "Loading MSRs".
2319 */
2320 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2321 const char *const pszFailure = "VMX-abort";
2322
2323 /*
2324 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2325 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2326 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2327 */
2328 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2329 if (!cMsrs)
2330 return VINF_SUCCESS;
2331
2332 /*
2333 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2334 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2335 * implementation causes a VMX-abort followed by a triple-fault.
2336 */
2337 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2338 if (fIsMsrCountValid)
2339 { /* likely */ }
2340 else
2341 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2342
2343 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2344 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2345 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2346 if (RT_SUCCESS(rc))
2347 {
2348 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2349 Assert(pMsr);
2350 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2351 {
2352 if ( !pMsr->u32Reserved
2353 && pMsr->u32Msr != MSR_K8_FS_BASE
2354 && pMsr->u32Msr != MSR_K8_GS_BASE
2355 && pMsr->u32Msr != MSR_K6_EFER
2356 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2357 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2358 {
2359 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2360 if (rcStrict == VINF_SUCCESS)
2361 continue;
2362
2363 /*
2364 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2365 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2366 * recording the MSR index in the auxiliary info. field and indicated further by our
2367 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2368 * if possible, or come up with a better, generic solution.
2369 */
2370 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2371 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2372 ? kVmxVDiag_Vmexit_MsrLoadRing3
2373 : kVmxVDiag_Vmexit_MsrLoad;
2374 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2375 }
2376 else
2377 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2378 }
2379 }
2380 else
2381 {
2382 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2383 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2384 }
2385
2386 NOREF(uExitReason);
2387 NOREF(pszFailure);
2388 return VINF_SUCCESS;
2389}
2390
2391
2392/**
2393 * Loads the host state as part of VM-exit.
2394 *
2395 * @returns Strict VBox status code.
2396 * @param pVCpu The cross context virtual CPU structure.
2397 * @param uExitReason The VM-exit reason (for logging purposes).
2398 */
2399IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2400{
2401 /*
2402 * Load host state.
2403 * See Intel spec. 27.5 "Loading Host State".
2404 */
2405 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2406 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2407
2408 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2409 if ( CPUMIsGuestInLongMode(pVCpu)
2410 && !fHostInLongMode)
2411 {
2412 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2413 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2414 }
2415
2416 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2417 iemVmxVmexitLoadHostSegRegs(pVCpu);
2418
2419 /*
2420 * Load host RIP, RSP and RFLAGS.
2421 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2422 */
2423 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2424 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2425 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2426
2427 /* Clear address range monitoring. */
2428 EMMonitorWaitClear(pVCpu);
2429
2430 /* Perform the VMX transition (PGM updates). */
2431 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2432 if (rcStrict == VINF_SUCCESS)
2433 {
2434 /* Check host PDPTEs (only when we've fully switched page tables_. */
2435 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2436 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2437 if (RT_FAILURE(rc))
2438 {
2439 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2440 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2441 }
2442 }
2443 else if (RT_SUCCESS(rcStrict))
2444 {
2445 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2446 uExitReason));
2447 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2448 }
2449 else
2450 {
2451 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2452 return VBOXSTRICTRC_VAL(rcStrict);
2453 }
2454
2455 Assert(rcStrict == VINF_SUCCESS);
2456
2457 /* Load MSRs from the VM-exit auto-load MSR area. */
2458 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2459 if (RT_FAILURE(rc))
2460 {
2461 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2462 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2463 }
2464
2465 return rcStrict;
2466}
2467
2468
2469/**
2470 * Gets VM-exit instruction information along with any displacement for an
2471 * instruction VM-exit.
2472 *
2473 * @returns The VM-exit instruction information.
2474 * @param pVCpu The cross context virtual CPU structure.
2475 * @param uExitReason The VM-exit reason.
2476 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2477 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2478 * NULL.
2479 */
2480IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
2481{
2482 RTGCPTR GCPtrDisp;
2483 VMXEXITINSTRINFO ExitInstrInfo;
2484 ExitInstrInfo.u = 0;
2485
2486 /*
2487 * Get and parse the ModR/M byte from our decoded opcodes.
2488 */
2489 uint8_t bRm;
2490 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2491 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2492 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2493 {
2494 /*
2495 * ModR/M indicates register addressing.
2496 *
2497 * The primary/secondary register operands are reported in the iReg1 or iReg2
2498 * fields depending on whether it is a read/write form.
2499 */
2500 uint8_t idxReg1;
2501 uint8_t idxReg2;
2502 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2503 {
2504 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2505 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2506 }
2507 else
2508 {
2509 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2510 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2511 }
2512 ExitInstrInfo.All.u2Scaling = 0;
2513 ExitInstrInfo.All.iReg1 = idxReg1;
2514 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2515 ExitInstrInfo.All.fIsRegOperand = 1;
2516 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2517 ExitInstrInfo.All.iSegReg = 0;
2518 ExitInstrInfo.All.iIdxReg = 0;
2519 ExitInstrInfo.All.fIdxRegInvalid = 1;
2520 ExitInstrInfo.All.iBaseReg = 0;
2521 ExitInstrInfo.All.fBaseRegInvalid = 1;
2522 ExitInstrInfo.All.iReg2 = idxReg2;
2523
2524 /* Displacement not applicable for register addressing. */
2525 GCPtrDisp = 0;
2526 }
2527 else
2528 {
2529 /*
2530 * ModR/M indicates memory addressing.
2531 */
2532 uint8_t uScale = 0;
2533 bool fBaseRegValid = false;
2534 bool fIdxRegValid = false;
2535 uint8_t iBaseReg = 0;
2536 uint8_t iIdxReg = 0;
2537 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2538 {
2539 /*
2540 * Parse the ModR/M, displacement for 16-bit addressing mode.
2541 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2542 */
2543 uint16_t u16Disp = 0;
2544 uint8_t const offDisp = offModRm + sizeof(bRm);
2545 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2546 {
2547 /* Displacement without any registers. */
2548 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2549 }
2550 else
2551 {
2552 /* Register (index and base). */
2553 switch (bRm & X86_MODRM_RM_MASK)
2554 {
2555 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2556 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2557 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2558 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2559 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2560 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2561 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2562 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2563 }
2564
2565 /* Register + displacement. */
2566 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2567 {
2568 case 0: break;
2569 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2570 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2571 default:
2572 {
2573 /* Register addressing, handled at the beginning. */
2574 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2575 break;
2576 }
2577 }
2578 }
2579
2580 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2581 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2582 }
2583 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2584 {
2585 /*
2586 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2587 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2588 */
2589 uint32_t u32Disp = 0;
2590 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2591 {
2592 /* Displacement without any registers. */
2593 uint8_t const offDisp = offModRm + sizeof(bRm);
2594 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2595 }
2596 else
2597 {
2598 /* Register (and perhaps scale, index and base). */
2599 uint8_t offDisp = offModRm + sizeof(bRm);
2600 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2601 if (iBaseReg == 4)
2602 {
2603 /* An SIB byte follows the ModR/M byte, parse it. */
2604 uint8_t bSib;
2605 uint8_t const offSib = offModRm + sizeof(bRm);
2606 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2607
2608 /* A displacement may follow SIB, update its offset. */
2609 offDisp += sizeof(bSib);
2610
2611 /* Get the scale. */
2612 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2613
2614 /* Get the index register. */
2615 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2616 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2617
2618 /* Get the base register. */
2619 iBaseReg = bSib & X86_SIB_BASE_MASK;
2620 fBaseRegValid = true;
2621 if (iBaseReg == 5)
2622 {
2623 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2624 {
2625 /* Mod is 0 implies a 32-bit displacement with no base. */
2626 fBaseRegValid = false;
2627 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2628 }
2629 else
2630 {
2631 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2632 iBaseReg = X86_GREG_xBP;
2633 }
2634 }
2635 }
2636
2637 /* Register + displacement. */
2638 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2639 {
2640 case 0: /* Handled above */ break;
2641 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2642 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2643 default:
2644 {
2645 /* Register addressing, handled at the beginning. */
2646 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2647 break;
2648 }
2649 }
2650 }
2651
2652 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2653 }
2654 else
2655 {
2656 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2657
2658 /*
2659 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2660 * See Intel instruction spec. 2.2 "IA-32e Mode".
2661 */
2662 uint64_t u64Disp = 0;
2663 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2664 if (fRipRelativeAddr)
2665 {
2666 /*
2667 * RIP-relative addressing mode.
2668 *
2669 * The displacement is 32-bit signed implying an offset range of +/-2G.
2670 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2671 */
2672 uint8_t const offDisp = offModRm + sizeof(bRm);
2673 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2674 }
2675 else
2676 {
2677 uint8_t offDisp = offModRm + sizeof(bRm);
2678
2679 /*
2680 * Register (and perhaps scale, index and base).
2681 *
2682 * REX.B extends the most-significant bit of the base register. However, REX.B
2683 * is ignored while determining whether an SIB follows the opcode. Hence, we
2684 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2685 *
2686 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2687 */
2688 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2689 if (iBaseReg == 4)
2690 {
2691 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2692 uint8_t bSib;
2693 uint8_t const offSib = offModRm + sizeof(bRm);
2694 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2695
2696 /* Displacement may follow SIB, update its offset. */
2697 offDisp += sizeof(bSib);
2698
2699 /* Get the scale. */
2700 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2701
2702 /* Get the index. */
2703 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2704 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2705
2706 /* Get the base. */
2707 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2708 fBaseRegValid = true;
2709 if (iBaseReg == 5)
2710 {
2711 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2712 {
2713 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2714 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2715 }
2716 else
2717 {
2718 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2719 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2720 }
2721 }
2722 }
2723 iBaseReg |= pVCpu->iem.s.uRexB;
2724
2725 /* Register + displacement. */
2726 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2727 {
2728 case 0: /* Handled above */ break;
2729 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2730 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2731 default:
2732 {
2733 /* Register addressing, handled at the beginning. */
2734 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2735 break;
2736 }
2737 }
2738 }
2739
2740 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2741 }
2742
2743 /*
2744 * The primary or secondary register operand is reported in iReg2 depending
2745 * on whether the primary operand is in read/write form.
2746 */
2747 uint8_t idxReg2;
2748 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2749 {
2750 idxReg2 = bRm & X86_MODRM_RM_MASK;
2751 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2752 idxReg2 |= pVCpu->iem.s.uRexB;
2753 }
2754 else
2755 {
2756 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2757 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2758 idxReg2 |= pVCpu->iem.s.uRexReg;
2759 }
2760 ExitInstrInfo.All.u2Scaling = uScale;
2761 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2762 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2763 ExitInstrInfo.All.fIsRegOperand = 0;
2764 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2765 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2766 ExitInstrInfo.All.iIdxReg = iIdxReg;
2767 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2768 ExitInstrInfo.All.iBaseReg = iBaseReg;
2769 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2770 ExitInstrInfo.All.iReg2 = idxReg2;
2771 }
2772
2773 /*
2774 * Handle exceptions to the norm for certain instructions.
2775 * (e.g. some instructions convey an instruction identity in place of iReg2).
2776 */
2777 switch (uExitReason)
2778 {
2779 case VMX_EXIT_GDTR_IDTR_ACCESS:
2780 {
2781 Assert(VMXINSTRID_IS_VALID(uInstrId));
2782 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2783 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2784 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2785 break;
2786 }
2787
2788 case VMX_EXIT_LDTR_TR_ACCESS:
2789 {
2790 Assert(VMXINSTRID_IS_VALID(uInstrId));
2791 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2792 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2793 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2794 break;
2795 }
2796
2797 case VMX_EXIT_RDRAND:
2798 case VMX_EXIT_RDSEED:
2799 {
2800 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2801 break;
2802 }
2803 }
2804
2805 /* Update displacement and return the constructed VM-exit instruction information field. */
2806 if (pGCPtrDisp)
2807 *pGCPtrDisp = GCPtrDisp;
2808
2809 return ExitInstrInfo.u;
2810}
2811
2812
2813/**
2814 * VMX VM-exit handler.
2815 *
2816 * @returns Strict VBox status code.
2817 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2818 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2819 * triple-fault.
2820 *
2821 * @param pVCpu The cross context virtual CPU structure.
2822 * @param uExitReason The VM-exit reason.
2823 *
2824 * @remarks Make sure VM-exit qualification is updated before calling this
2825 * function!
2826 */
2827IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2828{
2829# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2830 RT_NOREF2(pVCpu, uExitReason);
2831 return VINF_EM_RAW_EMULATE_INSTR;
2832# else
2833 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
2834
2835 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2836 Assert(pVmcs);
2837
2838 pVmcs->u32RoExitReason = uExitReason;
2839
2840 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2841 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2842 * during injection. */
2843
2844 /*
2845 * Save the guest state back into the VMCS.
2846 * We only need to save the state when the VM-entry was successful.
2847 */
2848 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2849 if (!fVmentryFailed)
2850 {
2851 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2852 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2853 if (RT_SUCCESS(rc))
2854 { /* likely */ }
2855 else
2856 {
2857 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VMX-Abort");
2858 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2859 }
2860 }
2861 else
2862 {
2863 /* Restore force-flags that may or may not have been cleared as part of the failed VM-entry. */
2864 iemVmxVmexitRestoreForceFlags(pVCpu);
2865 }
2866
2867 /*
2868 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2869 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2870 * pass just the lower bits, till then an assert should suffice.
2871 */
2872 Assert(!RT_HI_U16(uExitReason));
2873
2874 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2875 if (RT_FAILURE(rcStrict))
2876 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2877
2878 /* We're no longer in nested-guest execution mode. */
2879 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2880
2881 Assert(rcStrict == VINF_SUCCESS);
2882 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VM-exit");
2883 return VINF_VMX_VMEXIT;
2884# endif
2885}
2886
2887
2888/**
2889 * VMX VM-exit handler for VM-exits due to instruction execution.
2890 *
2891 * This is intended for instructions where the caller provides all the relevant
2892 * VM-exit information.
2893 *
2894 * @returns Strict VBox status code.
2895 * @param pVCpu The cross context virtual CPU structure.
2896 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2897 */
2898DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2899{
2900 /*
2901 * For instructions where any of the following fields are not applicable:
2902 * - VM-exit instruction info. is undefined.
2903 * - VM-exit qualification must be cleared.
2904 * - VM-exit guest-linear address is undefined.
2905 * - VM-exit guest-physical address is undefined.
2906 *
2907 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2908 * instruction execution. For VM-exits that are not due to instruction execution this
2909 * field is undefined.
2910 *
2911 * In our implementation in IEM, all undefined fields are generally cleared. However,
2912 * if the caller supplies information (from say the physical CPU directly) it is
2913 * then possible that the undefined fields are not cleared.
2914 *
2915 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2916 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2917 */
2918 Assert(pExitInfo);
2919 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2920 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2921 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2922
2923 /* Update all the relevant fields from the VM-exit instruction information struct. */
2924 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2925 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2926 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2927 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2928 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2929
2930 /* Perform the VM-exit. */
2931 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2932}
2933
2934
2935/**
2936 * VMX VM-exit handler for VM-exits due to instruction execution.
2937 *
2938 * This is intended for instructions that only provide the VM-exit instruction
2939 * length.
2940 *
2941 * @param pVCpu The cross context virtual CPU structure.
2942 * @param uExitReason The VM-exit reason.
2943 * @param cbInstr The instruction length in bytes.
2944 */
2945IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2946{
2947 VMXVEXITINFO ExitInfo;
2948 RT_ZERO(ExitInfo);
2949 ExitInfo.uReason = uExitReason;
2950 ExitInfo.cbInstr = cbInstr;
2951
2952#ifdef VBOX_STRICT
2953 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2954 switch (uExitReason)
2955 {
2956 case VMX_EXIT_INVEPT:
2957 case VMX_EXIT_INVPCID:
2958 case VMX_EXIT_LDTR_TR_ACCESS:
2959 case VMX_EXIT_GDTR_IDTR_ACCESS:
2960 case VMX_EXIT_VMCLEAR:
2961 case VMX_EXIT_VMPTRLD:
2962 case VMX_EXIT_VMPTRST:
2963 case VMX_EXIT_VMREAD:
2964 case VMX_EXIT_VMWRITE:
2965 case VMX_EXIT_VMXON:
2966 case VMX_EXIT_XRSTORS:
2967 case VMX_EXIT_XSAVES:
2968 case VMX_EXIT_RDRAND:
2969 case VMX_EXIT_RDSEED:
2970 case VMX_EXIT_IO_INSTR:
2971 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2972 break;
2973 }
2974#endif
2975
2976 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2977}
2978
2979
2980/**
2981 * VMX VM-exit handler for VM-exits due to instruction execution.
2982 *
2983 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2984 * instruction information and VM-exit qualification fields.
2985 *
2986 * @param pVCpu The cross context virtual CPU structure.
2987 * @param uExitReason The VM-exit reason.
2988 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2989 * @param cbInstr The instruction length in bytes.
2990 *
2991 * @remarks Do not use this for INS/OUTS instruction.
2992 */
2993IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2994{
2995 VMXVEXITINFO ExitInfo;
2996 RT_ZERO(ExitInfo);
2997 ExitInfo.uReason = uExitReason;
2998 ExitInfo.cbInstr = cbInstr;
2999
3000 /*
3001 * Update the VM-exit qualification field with displacement bytes.
3002 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3003 */
3004 switch (uExitReason)
3005 {
3006 case VMX_EXIT_INVEPT:
3007 case VMX_EXIT_INVPCID:
3008 case VMX_EXIT_LDTR_TR_ACCESS:
3009 case VMX_EXIT_GDTR_IDTR_ACCESS:
3010 case VMX_EXIT_VMCLEAR:
3011 case VMX_EXIT_VMPTRLD:
3012 case VMX_EXIT_VMPTRST:
3013 case VMX_EXIT_VMREAD:
3014 case VMX_EXIT_VMWRITE:
3015 case VMX_EXIT_VMXON:
3016 case VMX_EXIT_XRSTORS:
3017 case VMX_EXIT_XSAVES:
3018 case VMX_EXIT_RDRAND:
3019 case VMX_EXIT_RDSEED:
3020 {
3021 /* Construct the VM-exit instruction information. */
3022 RTGCPTR GCPtrDisp;
3023 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
3024
3025 /* Update the VM-exit instruction information. */
3026 ExitInfo.InstrInfo.u = uInstrInfo;
3027
3028 /* Update the VM-exit qualification. */
3029 ExitInfo.u64Qual = GCPtrDisp;
3030 break;
3031 }
3032
3033 default:
3034 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
3035 break;
3036 }
3037
3038 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3039}
3040
3041
3042/**
3043 * Checks whether an I/O instruction for the given port is intercepted (causes a
3044 * VM-exit) or not.
3045 *
3046 * @returns @c true if the instruction is intercepted, @c false otherwise.
3047 * @param pVCpu The cross context virtual CPU structure.
3048 * @param u16Port The I/O port being accessed by the instruction.
3049 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3050 */
3051IEM_STATIC bool iemVmxIsIoInterceptSet(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
3052{
3053 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3054 Assert(pVmcs);
3055
3056 /*
3057 * Check whether the I/O instruction must cause a VM-exit or not.
3058 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3059 */
3060 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT)
3061 return true;
3062
3063 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3064 {
3065 uint8_t const *pbIoBitmapA = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap);
3066 uint8_t const *pbIoBitmapB = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
3067 Assert(pbIoBitmapA);
3068 Assert(pbIoBitmapB);
3069 return HMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
3070 }
3071
3072 return false;
3073}
3074
3075
3076/**
3077 * VMX VM-exit handler for VM-exits due to Monitor-Trap Flag (MTF).
3078 *
3079 * @returns Strict VBox status code.
3080 * @param pVCpu The cross context virtual CPU structure.
3081 */
3082IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu)
3083{
3084 /*
3085 * The MTF VM-exit can occur even when the MTF VM-execution control is
3086 * not set (e.g. when VM-entry injects an MTF pending event), so do not
3087 * check for it here.
3088 */
3089
3090 /* Clear the force-flag indicating that monitor-trap flag is no longer active. */
3091 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
3092
3093 /* Cause the MTF VM-exit. The VM-exit qualification MBZ. */
3094 iemVmxVmcsSetExitQual(pVCpu, 0);
3095 return iemVmxVmexit(pVCpu, VMX_EXIT_MTF);
3096}
3097
3098
3099/**
3100 * VMX VM-exit handler for VM-exits due to INVLPG.
3101 *
3102 * @returns Strict VBox status code.
3103 * @param pVCpu The cross context virtual CPU structure.
3104 * @param GCPtrPage The guest-linear address of the page being invalidated.
3105 * @param cbInstr The instruction length in bytes.
3106 */
3107IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
3108{
3109 VMXVEXITINFO ExitInfo;
3110 RT_ZERO(ExitInfo);
3111 ExitInfo.uReason = VMX_EXIT_INVLPG;
3112 ExitInfo.cbInstr = cbInstr;
3113 ExitInfo.u64Qual = GCPtrPage;
3114 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
3115
3116 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3117}
3118
3119
3120/**
3121 * VMX VM-exit handler for VM-exits due to LMSW.
3122 *
3123 * @returns Strict VBox status code.
3124 * @param pVCpu The cross context virtual CPU structure.
3125 * @param uGuestCr0 The current guest CR0.
3126 * @param pu16NewMsw The machine-status word specified in LMSW's source
3127 * operand. This will be updated depending on the VMX
3128 * guest/host CR0 mask if LMSW is not intercepted.
3129 * @param GCPtrEffDst The guest-linear address of the source operand in case
3130 * of a memory operand. For register operand, pass
3131 * NIL_RTGCPTR.
3132 * @param cbInstr The instruction length in bytes.
3133 */
3134IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
3135 uint8_t cbInstr)
3136{
3137 /*
3138 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
3139 *
3140 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
3141 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3142 */
3143 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3144 Assert(pVmcs);
3145 Assert(pu16NewMsw);
3146
3147 bool fIntercept = false;
3148 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3149 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3150
3151 /*
3152 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
3153 * CR0.PE case first, before the rest of the bits in the MSW.
3154 *
3155 * If CR0.PE is owned by the host and CR0.PE differs between the
3156 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
3157 */
3158 if ( (fGstHostMask & X86_CR0_PE)
3159 && (*pu16NewMsw & X86_CR0_PE)
3160 && !(fReadShadow & X86_CR0_PE))
3161 fIntercept = true;
3162
3163 /*
3164 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
3165 * bits differ between the MSW (source operand) and the read-shadow, we must
3166 * cause a VM-exit.
3167 */
3168 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3169 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
3170 fIntercept = true;
3171
3172 if (fIntercept)
3173 {
3174 Log2(("lmsw: Guest intercept -> VM-exit\n"));
3175
3176 VMXVEXITINFO ExitInfo;
3177 RT_ZERO(ExitInfo);
3178 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3179 ExitInfo.cbInstr = cbInstr;
3180
3181 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
3182 if (fMemOperand)
3183 {
3184 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3185 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3186 }
3187
3188 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3189 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
3190 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
3191 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
3192
3193 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3194 }
3195
3196 /*
3197 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3198 * CR0 guest/host mask must be left unmodified.
3199 *
3200 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3201 */
3202 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3203 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
3204
3205 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3206}
3207
3208
3209/**
3210 * VMX VM-exit handler for VM-exits due to CLTS.
3211 *
3212 * @returns Strict VBox status code.
3213 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3214 * VM-exit but must not modify the guest CR0.TS bit.
3215 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3216 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3217 * CR0 fixed bits in VMX operation).
3218 * @param pVCpu The cross context virtual CPU structure.
3219 * @param cbInstr The instruction length in bytes.
3220 */
3221IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
3222{
3223 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3224 Assert(pVmcs);
3225
3226 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3227 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3228
3229 /*
3230 * If CR0.TS is owned by the host:
3231 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3232 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3233 * CLTS instruction completes without clearing CR0.TS.
3234 *
3235 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3236 */
3237 if (fGstHostMask & X86_CR0_TS)
3238 {
3239 if (fReadShadow & X86_CR0_TS)
3240 {
3241 Log2(("clts: Guest intercept -> VM-exit\n"));
3242
3243 VMXVEXITINFO ExitInfo;
3244 RT_ZERO(ExitInfo);
3245 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3246 ExitInfo.cbInstr = cbInstr;
3247
3248 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3249 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
3250 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3251 }
3252
3253 return VINF_VMX_MODIFIES_BEHAVIOR;
3254 }
3255
3256 /*
3257 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3258 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3259 */
3260 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3261}
3262
3263
3264/**
3265 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3266 * (CR0/CR4 write).
3267 *
3268 * @returns Strict VBox status code.
3269 * @param pVCpu The cross context virtual CPU structure.
3270 * @param iCrReg The control register (either CR0 or CR4).
3271 * @param uGuestCrX The current guest CR0/CR4.
3272 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3273 * if no VM-exit is caused.
3274 * @param iGReg The general register from which the CR0/CR4 value is
3275 * being loaded.
3276 * @param cbInstr The instruction length in bytes.
3277 */
3278IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3279 uint8_t cbInstr)
3280{
3281 Assert(puNewCrX);
3282 Assert(iCrReg == 0 || iCrReg == 4);
3283
3284 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3285 Assert(pVmcs);
3286
3287 uint64_t uGuestCrX;
3288 uint64_t fGstHostMask;
3289 uint64_t fReadShadow;
3290 if (iCrReg == 0)
3291 {
3292 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3293 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3294 fGstHostMask = pVmcs->u64Cr0Mask.u;
3295 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3296 }
3297 else
3298 {
3299 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3300 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3301 fGstHostMask = pVmcs->u64Cr4Mask.u;
3302 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3303 }
3304
3305 /*
3306 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3307 * corresponding bits differ between the source operand and the read-shadow,
3308 * we must cause a VM-exit.
3309 *
3310 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3311 */
3312 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3313 {
3314 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3315
3316 VMXVEXITINFO ExitInfo;
3317 RT_ZERO(ExitInfo);
3318 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3319 ExitInfo.cbInstr = cbInstr;
3320
3321 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3322 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3323 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3324 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3325 }
3326
3327 /*
3328 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3329 * must not be modified the instruction.
3330 *
3331 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3332 */
3333 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3334
3335 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3336}
3337
3338
3339/**
3340 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3341 *
3342 * @returns VBox strict status code.
3343 * @param pVCpu The cross context virtual CPU structure.
3344 * @param iGReg The general register to which the CR3 value is being stored.
3345 * @param cbInstr The instruction length in bytes.
3346 */
3347IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3348{
3349 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3350 Assert(pVmcs);
3351 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3352
3353 /*
3354 * If the CR3-store exiting control is set, we must cause a VM-exit.
3355 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3356 */
3357 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3358 {
3359 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3360
3361 VMXVEXITINFO ExitInfo;
3362 RT_ZERO(ExitInfo);
3363 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3364 ExitInfo.cbInstr = cbInstr;
3365
3366 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3367 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3368 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3369 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3370 }
3371
3372 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3373}
3374
3375
3376/**
3377 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3378 *
3379 * @returns VBox strict status code.
3380 * @param pVCpu The cross context virtual CPU structure.
3381 * @param uNewCr3 The new CR3 value.
3382 * @param iGReg The general register from which the CR3 value is being
3383 * loaded.
3384 * @param cbInstr The instruction length in bytes.
3385 */
3386IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3387{
3388 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3389 Assert(pVmcs);
3390
3391 /*
3392 * If the CR3-load exiting control is set and the new CR3 value does not
3393 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3394 *
3395 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3396 */
3397 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3398 {
3399 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3400 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3401
3402 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3403 {
3404 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3405 if (uNewCr3 != uCr3TargetValue)
3406 {
3407 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3408
3409 VMXVEXITINFO ExitInfo;
3410 RT_ZERO(ExitInfo);
3411 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3412 ExitInfo.cbInstr = cbInstr;
3413
3414 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3415 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3416 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3417 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3418 }
3419 }
3420 }
3421
3422 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3423}
3424
3425
3426/**
3427 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3428 *
3429 * @returns VBox strict status code.
3430 * @param pVCpu The cross context virtual CPU structure.
3431 * @param iGReg The general register to which the CR8 value is being stored.
3432 * @param cbInstr The instruction length in bytes.
3433 */
3434IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3435{
3436 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3437 Assert(pVmcs);
3438
3439 /*
3440 * If the CR8-store exiting control is set, we must cause a VM-exit.
3441 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3442 */
3443 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3444 {
3445 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3446
3447 VMXVEXITINFO ExitInfo;
3448 RT_ZERO(ExitInfo);
3449 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3450 ExitInfo.cbInstr = cbInstr;
3451
3452 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3453 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3454 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3455 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3456 }
3457
3458 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3459}
3460
3461
3462/**
3463 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3464 *
3465 * @returns VBox strict status code.
3466 * @param pVCpu The cross context virtual CPU structure.
3467 * @param iGReg The general register from which the CR8 value is being
3468 * loaded.
3469 * @param cbInstr The instruction length in bytes.
3470 */
3471IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3472{
3473 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3474 Assert(pVmcs);
3475
3476 /*
3477 * If the CR8-load exiting control is set, we must cause a VM-exit.
3478 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3479 */
3480 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3481 {
3482 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3483
3484 VMXVEXITINFO ExitInfo;
3485 RT_ZERO(ExitInfo);
3486 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3487 ExitInfo.cbInstr = cbInstr;
3488
3489 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3490 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3491 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3492 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3493 }
3494
3495 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3496}
3497
3498
3499/**
3500 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3501 * GReg,DRx' (DRx read).
3502 *
3503 * @returns VBox strict status code.
3504 * @param pVCpu The cross context virtual CPU structure.
3505 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3506 * VMXINSTRID_MOV_FROM_DRX).
3507 * @param iDrReg The debug register being accessed.
3508 * @param iGReg The general register to/from which the DRx value is being
3509 * store/loaded.
3510 * @param cbInstr The instruction length in bytes.
3511 */
3512IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3513 uint8_t cbInstr)
3514{
3515 Assert(iDrReg <= 7);
3516 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3517
3518 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3519 Assert(pVmcs);
3520
3521 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3522 {
3523 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3524 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3525 VMXVEXITINFO ExitInfo;
3526 RT_ZERO(ExitInfo);
3527 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3528 ExitInfo.cbInstr = cbInstr;
3529 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3530 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3531 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3532 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3533 }
3534
3535 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3536}
3537
3538
3539/**
3540 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure.
3544 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3545 * VMXINSTRID_IO_OUT).
3546 * @param u16Port The I/O port being accessed.
3547 * @param fImm Whether the I/O port was encoded using an immediate operand
3548 * or the implicit DX register.
3549 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3550 * @param cbInstr The instruction length in bytes.
3551 */
3552IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3553 uint8_t cbInstr)
3554{
3555 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3556 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3557
3558 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3559 if (fIntercept)
3560 {
3561 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3562 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3563 VMXVEXITINFO ExitInfo;
3564 RT_ZERO(ExitInfo);
3565 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3566 ExitInfo.cbInstr = cbInstr;
3567 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3568 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3569 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3570 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3571 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3572 }
3573
3574 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3575}
3576
3577
3578/**
3579 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3580 *
3581 * @returns VBox strict status code.
3582 * @param pVCpu The cross context virtual CPU structure.
3583 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3584 * VMXINSTRID_IO_OUTS).
3585 * @param u16Port The I/O port being accessed.
3586 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3587 * @param fRep Whether the instruction has a REP prefix or not.
3588 * @param ExitInstrInfo The VM-exit instruction info. field.
3589 * @param cbInstr The instruction length in bytes.
3590 */
3591IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3592 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3593{
3594 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3595 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3596 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3597 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3598 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3599
3600 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3601 if (fIntercept)
3602 {
3603 /*
3604 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3605 */
3606 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3607 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3608 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3609 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3610 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3611
3612 uint32_t uDirection;
3613 uint64_t uGuestLinearAddr;
3614 if (uInstrId == VMXINSTRID_IO_INS)
3615 {
3616 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3617 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3618 }
3619 else
3620 {
3621 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3622 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3623 }
3624
3625 /*
3626 * If the segment is ununsable, the guest-linear address in undefined.
3627 * We shall clear it for consistency.
3628 *
3629 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3630 */
3631 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3632 uGuestLinearAddr = 0;
3633
3634 VMXVEXITINFO ExitInfo;
3635 RT_ZERO(ExitInfo);
3636 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3637 ExitInfo.cbInstr = cbInstr;
3638 ExitInfo.InstrInfo = ExitInstrInfo;
3639 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3640 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3641 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3642 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3643 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3644 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3645 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3646 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3647 }
3648
3649 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3650}
3651
3652
3653/**
3654 * VMX VM-exit handler for VM-exits due to MWAIT.
3655 *
3656 * @returns VBox strict status code.
3657 * @param pVCpu The cross context virtual CPU structure.
3658 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3659 * @param cbInstr The instruction length in bytes.
3660 */
3661IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3662{
3663 VMXVEXITINFO ExitInfo;
3664 RT_ZERO(ExitInfo);
3665 ExitInfo.uReason = VMX_EXIT_MWAIT;
3666 ExitInfo.cbInstr = cbInstr;
3667 ExitInfo.u64Qual = fMonitorHwArmed;
3668 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3669}
3670
3671
3672/**
3673 * VMX VM-exit handler for VM-exits due to PAUSE.
3674 *
3675 * @returns VBox strict status code.
3676 * @param pVCpu The cross context virtual CPU structure.
3677 * @param cbInstr The instruction length in bytes.
3678 */
3679IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3680{
3681 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3682 Assert(pVmcs);
3683
3684 /*
3685 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3686 * "PAUSE-loop exiting" control.
3687 *
3688 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3689 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3690 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3691 * a VM-exit.
3692 *
3693 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3694 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3695 */
3696 bool fIntercept = false;
3697 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3698 fIntercept = true;
3699 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3700 && pVCpu->iem.s.uCpl == 0)
3701 {
3702 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3703
3704 /*
3705 * A previous-PAUSE-tick value of 0 is used to identify the first time
3706 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3707 * consider this to be the first execution of PAUSE in a loop according
3708 * to the Intel.
3709 *
3710 * All subsequent records for the previous-PAUSE-tick we ensure that it
3711 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3712 */
3713 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3714 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3715 uint64_t const uTick = TMCpuTickGet(pVCpu);
3716 uint32_t const uPleGap = pVmcs->u32PleGap;
3717 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3718 if ( *puPrevPauseTick == 0
3719 || uTick - *puPrevPauseTick > uPleGap)
3720 *puFirstPauseLoopTick = uTick;
3721 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3722 fIntercept = true;
3723
3724 *puPrevPauseTick = uTick | 1;
3725 }
3726
3727 if (fIntercept)
3728 {
3729 VMXVEXITINFO ExitInfo;
3730 RT_ZERO(ExitInfo);
3731 ExitInfo.uReason = VMX_EXIT_PAUSE;
3732 ExitInfo.cbInstr = cbInstr;
3733 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3734 }
3735
3736 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3737}
3738
3739
3740/**
3741 * VMX VM-exit handler for VM-exits due to task switches.
3742 *
3743 * @returns VBox strict status code.
3744 * @param pVCpu The cross context virtual CPU structure.
3745 * @param enmTaskSwitch The cause of the task switch.
3746 * @param SelNewTss The selector of the new TSS.
3747 * @param cbInstr The instruction length in bytes.
3748 */
3749IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3750{
3751 /*
3752 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.
3753 *
3754 * If the the cause of the task switch is due to execution of CALL, IRET or the JMP
3755 * instruction or delivery of the exception generated by one of these instructions
3756 * lead to a task switch through a task gate in the IDT, we need to provide the
3757 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3758 * leaves the VM-exit instruction length field undefined.
3759 *
3760 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3761 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3762 */
3763 Assert(cbInstr <= 15);
3764
3765 uint8_t uType;
3766 switch (enmTaskSwitch)
3767 {
3768 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3769 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3770 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3771 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3773 }
3774
3775 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3776 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3777 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3778 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3779 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH);
3780}
3781
3782
3783/**
3784 * VMX VM-exit handler for VM-exits due to expiry of the preemption timer.
3785 *
3786 * @returns VBox strict status code.
3787 * @param pVCpu The cross context virtual CPU structure.
3788 */
3789IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3790{
3791 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3792 Assert(pVmcs);
3793
3794 /* Check if the guest has enabled VMX-preemption timers in the first place. */
3795 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3796 {
3797 /*
3798 * Calculate the current VMX-preemption timer value.
3799 * Only if the value has reached zero, we cause the VM-exit.
3800 */
3801 uint32_t uPreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
3802 if (!uPreemptTimer)
3803 {
3804 /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */
3805 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
3806 pVmcs->u32PreemptTimer = 0;
3807
3808 /* Clear the force-flag indicating the VMX-preemption timer no longer active. */
3809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
3810
3811 /* Cause the VMX-preemption timer VM-exit. The VM-exit qualification MBZ. */
3812 iemVmxVmcsSetExitQual(pVCpu, 0);
3813 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER);
3814 }
3815 }
3816
3817 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3818}
3819
3820
3821/**
3822 * VMX VM-exit handler for VM-exits due to external interrupts.
3823 *
3824 * @returns VBox strict status code.
3825 * @param pVCpu The cross context virtual CPU structure.
3826 * @param uVector The external interrupt vector.
3827 * @param fIntPending Whether the external interrupt is pending or
3828 * acknowdledged in the interrupt controller.
3829 */
3830IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3831{
3832 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3833 Assert(pVmcs);
3834
3835 /* The VM-exit is subject to "External interrupt exiting" is being set. */
3836 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3837 {
3838 if (fIntPending)
3839 {
3840 /*
3841 * If the interrupt is pending and we don't need to acknowledge the
3842 * interrupt on VM-exit, cause the VM-exit immediately.
3843 *
3844 * See Intel spec 25.2 "Other Causes Of VM Exits".
3845 */
3846 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3847 {
3848 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3849 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3850 iemVmxVmcsSetExitQual(pVCpu, 0);
3851 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3852 }
3853
3854 /*
3855 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3856 * on VM-exit, postpone VM-exit til after the interrupt controller has been
3857 * acknowledged that the interrupt has been consumed.
3858 */
3859 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3860 }
3861
3862 /*
3863 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3864 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3865 * all set, we cause the VM-exit now. We need to record the external interrupt that
3866 * just occurred in the VM-exit interruption information field.
3867 *
3868 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3869 */
3870 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3871 {
3872 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3873 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3874 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3875 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3876 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3877 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3878 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3879 iemVmxVmcsSetExitQual(pVCpu, 0);
3880 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3881 }
3882 }
3883
3884 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3885}
3886
3887
3888/**
3889 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).
3890 *
3891 * @returns VBox strict status code.
3892 * @param pVCpu The cross context virtual CPU structure.
3893 * @param uVector The SIPI vector.
3894 */
3895IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
3896{
3897 iemVmxVmcsSetExitQual(pVCpu, uVector);
3898 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);
3899}
3900
3901
3902/**
3903 * VMX VM-exit handler for VM-exits due to init-IPIs (INIT).
3904 *
3905 * @returns VBox strict status code.
3906 * @param pVCpu The cross context virtual CPU structure.
3907 */
3908IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu)
3909{
3910 iemVmxVmcsSetExitQual(pVCpu, 0);
3911 return iemVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL);
3912}
3913
3914
3915/**
3916 * VMX VM-exit handler for interrupt-window VM-exits.
3917 *
3918 * @returns VBox strict status code.
3919 * @param pVCpu The cross context virtual CPU structure.
3920 */
3921IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu)
3922{
3923 iemVmxVmcsSetExitQual(pVCpu, 0);
3924 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW);
3925}
3926
3927
3928/**
3929 * VMX VM-exit handler for VM-exits due to delivery of an event.
3930 *
3931 * @returns VBox strict status code.
3932 * @param pVCpu The cross context virtual CPU structure.
3933 * @param uVector The interrupt / exception vector.
3934 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3935 * @param uErrCode The error code associated with the event.
3936 * @param uCr2 The CR2 value in case of a \#PF exception.
3937 * @param cbInstr The instruction length in bytes.
3938 */
3939IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3940 uint8_t cbInstr)
3941{
3942 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3943 Assert(pVmcs);
3944
3945 /*
3946 * If the event is being injected as part of VM-entry, it isn't subject to event
3947 * intercepts in the nested-guest. However, secondary exceptions that occur during
3948 * injection of any event -are- subject to event interception.
3949 *
3950 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3951 */
3952 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3953 {
3954 /* Update the IDT-vectoring event in the VMCS as the source of the upcoming event. */
3955 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
3956 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3957 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
3958 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
3959 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
3960 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
3961 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
3962 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
3963
3964 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3965 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3966 }
3967
3968 /*
3969 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3970 * If not, the caller will continue delivery of the external interrupt as it would
3971 * normally.
3972 */
3973 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3974 {
3975 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3976 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3977 }
3978
3979 /*
3980 * Evaluate intercepts for hardware exceptions including #BP, #DB, #OF
3981 * generated by INT3, INT1 (ICEBP) and INTO respectively.
3982 */
3983 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3984 bool fIntercept = false;
3985 bool fIsHwXcpt = false;
3986 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3987 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3988 {
3989 fIsHwXcpt = true;
3990 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
3991 if (uVector == X86_XCPT_NMI)
3992 fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
3993 else
3994 {
3995 /* Page-faults are subject to masking using its error code. */
3996 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
3997 if (uVector == X86_XCPT_PF)
3998 {
3999 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
4000 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
4001 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
4002 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
4003 }
4004
4005 /* Consult the exception bitmap for all hardware exceptions (except NMI). */
4006 if (fXcptBitmap & RT_BIT(uVector))
4007 fIntercept = true;
4008 }
4009 }
4010 /* else: Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
4011
4012 /*
4013 * Now that we've determined whether the software interrupt or hardware exception
4014 * causes a VM-exit, we need to construct the relevant VM-exit information and
4015 * cause the VM-exit.
4016 */
4017 if (fIntercept)
4018 {
4019 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
4020
4021 /* Construct the rest of the event related information fields and cause the VM-exit. */
4022 uint64_t uExitQual = 0;
4023 if (fIsHwXcpt)
4024 {
4025 if (uVector == X86_XCPT_PF)
4026 uExitQual = uCr2;
4027 else if (uVector == X86_XCPT_DB)
4028 {
4029 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
4030 uExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
4031 }
4032 }
4033
4034 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
4035 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
4036 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
4037 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
4038 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
4039 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
4040 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
4041 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
4042 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
4043 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
4044 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
4045
4046 /*
4047 * For VM exits due to software exceptions (those generated by INT3 or INTO) or privileged
4048 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
4049 * length.
4050 */
4051 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4052 && (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
4053 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
4054 else
4055 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
4056
4057 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI);
4058 }
4059
4060 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4061}
4062
4063
4064/**
4065 * VMX VM-exit handler for VM-exits due to a triple fault.
4066 *
4067 * @returns VBox strict status code.
4068 * @param pVCpu The cross context virtual CPU structure.
4069 */
4070IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
4071{
4072 iemVmxVmcsSetExitQual(pVCpu, 0);
4073 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT);
4074}
4075
4076
4077/**
4078 * VMX VM-exit handler for APIC-accesses.
4079 *
4080 * @param pVCpu The cross context virtual CPU structure.
4081 * @param offAccess The offset of the register being accessed.
4082 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4083 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4084 */
4085IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
4086{
4087 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
4088
4089 VMXAPICACCESS enmAccess;
4090 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
4091 if (fInEventDelivery)
4092 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
4093 else if (fAccess & IEM_ACCESS_INSTRUCTION)
4094 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
4095 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
4096 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4097 else
4098 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4099
4100 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
4101 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
4102 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
4103 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
4104}
4105
4106
4107/**
4108 * VMX VM-exit handler for APIC-write VM-exits.
4109 *
4110 * @param pVCpu The cross context virtual CPU structure.
4111 * @param offApic The write to the virtual-APIC page offset that caused this
4112 * VM-exit.
4113 */
4114IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu, uint16_t offApic)
4115{
4116 Assert(offApic < XAPIC_OFF_END + 4);
4117
4118 /* Write only bits 11:0 of the APIC offset into the VM-exit qualification field. */
4119 offApic &= UINT16_C(0xfff);
4120 iemVmxVmcsSetExitQual(pVCpu, offApic);
4121 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
4122}
4123
4124
4125/**
4126 * VMX VM-exit handler for virtualized-EOIs.
4127 *
4128 * @param pVCpu The cross context virtual CPU structure.
4129 */
4130IEM_STATIC VBOXSTRICTRC iemVmxVmexitVirtEoi(PVMCPU pVCpu, uint8_t uVector)
4131{
4132 iemVmxVmcsSetExitQual(pVCpu, uVector);
4133 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI);
4134}
4135
4136
4137/**
4138 * Sets virtual-APIC write emulation as pending.
4139 *
4140 * @param pVCpu The cross context virtual CPU structure.
4141 * @param offApic The offset in the virtual-APIC page that was written.
4142 */
4143DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPU pVCpu, uint16_t offApic)
4144{
4145 Assert(offApic < XAPIC_OFF_END + 4);
4146
4147 /*
4148 * Record the currently updated APIC offset, as we need this later for figuring
4149 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4150 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4151 */
4152 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
4153
4154 /*
4155 * Signal that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
4156 * virtualization or APIC-write emulation).
4157 */
4158 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4159 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4160}
4161
4162
4163/**
4164 * Clears any pending virtual-APIC write emulation.
4165 *
4166 * @returns The virtual-APIC offset that was written before clearing it.
4167 * @param pVCpu The cross context virtual CPU structure.
4168 */
4169DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPU pVCpu)
4170{
4171 uint8_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
4172 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
4173 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
4174 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4175 return offVirtApicWrite;
4176}
4177
4178
4179/**
4180 * Reads a 32-bit register from the virtual-APIC page at the given offset.
4181 *
4182 * @returns The register from the virtual-APIC page.
4183 * @param pVCpu The cross context virtual CPU structure.
4184 * @param offReg The offset of the register being read.
4185 */
4186DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
4187{
4188 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4189 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4190 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4191 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
4192 return uReg;
4193}
4194
4195
4196/**
4197 * Reads a 64-bit register from the virtual-APIC page at the given offset.
4198 *
4199 * @returns The register from the virtual-APIC page.
4200 * @param pVCpu The cross context virtual CPU structure.
4201 * @param offReg The offset of the register being read.
4202 */
4203DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
4204{
4205 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4206 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4207 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4208 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
4209 return uReg;
4210}
4211
4212
4213/**
4214 * Writes a 32-bit register to the virtual-APIC page at the given offset.
4215 *
4216 * @param pVCpu The cross context virtual CPU structure.
4217 * @param offReg The offset of the register being written.
4218 * @param uReg The register value to write.
4219 */
4220DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4221{
4222 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4223 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4224 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4225 *(uint32_t *)(pbVirtApic + offReg) = uReg;
4226}
4227
4228
4229/**
4230 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4231 *
4232 * @param pVCpu The cross context virtual CPU structure.
4233 * @param offReg The offset of the register being written.
4234 * @param uReg The register value to write.
4235 */
4236DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4237{
4238 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4239 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4240 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4241 *(uint64_t *)(pbVirtApic + offReg) = uReg;
4242}
4243
4244
4245/**
4246 * Sets the vector in a virtual-APIC 256-bit sparse register.
4247 *
4248 * @param pVCpu The cross context virtual CPU structure.
4249 * @param offReg The offset of the 256-bit spare register.
4250 * @param uVector The vector to set.
4251 *
4252 * @remarks This is based on our APIC device code.
4253 */
4254DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4255{
4256 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4257 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4258 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4259 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4260 ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit);
4261}
4262
4263
4264/**
4265 * Clears the vector in a virtual-APIC 256-bit sparse register.
4266 *
4267 * @param pVCpu The cross context virtual CPU structure.
4268 * @param offReg The offset of the 256-bit spare register.
4269 * @param uVector The vector to clear.
4270 *
4271 * @remarks This is based on our APIC device code.
4272 */
4273DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4274{
4275 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4276 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4277 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4278 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4279 ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit);
4280}
4281
4282
4283/**
4284 * Checks if a memory access to the APIC-access page must causes an APIC-access
4285 * VM-exit.
4286 *
4287 * @param pVCpu The cross context virtual CPU structure.
4288 * @param offAccess The offset of the register being accessed.
4289 * @param cbAccess The size of the access in bytes.
4290 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4291 * IEM_ACCESS_TYPE_WRITE).
4292 *
4293 * @remarks This must not be used for MSR-based APIC-access page accesses!
4294 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4295 */
4296IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4297{
4298 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4299 Assert(pVmcs);
4300 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4301
4302 /*
4303 * We must cause a VM-exit if any of the following are true:
4304 * - TPR shadowing isn't active.
4305 * - The access size exceeds 32-bits.
4306 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4307 *
4308 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4309 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4310 */
4311 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4312 || cbAccess > sizeof(uint32_t)
4313 || ((offAccess + cbAccess - 1) & 0xc)
4314 || offAccess >= XAPIC_OFF_END + 4)
4315 return true;
4316
4317 /*
4318 * If the access is part of an operation where we have already
4319 * virtualized a virtual-APIC write, we must cause a VM-exit.
4320 */
4321 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4322 return true;
4323
4324 /*
4325 * Check write accesses to the APIC-access page that cause VM-exits.
4326 */
4327 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4328 {
4329 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4330 {
4331 /*
4332 * With APIC-register virtualization, a write access to any of the
4333 * following registers are virtualized. Accessing any other register
4334 * causes a VM-exit.
4335 */
4336 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4337 switch (offAlignedAccess)
4338 {
4339 case XAPIC_OFF_ID:
4340 case XAPIC_OFF_TPR:
4341 case XAPIC_OFF_EOI:
4342 case XAPIC_OFF_LDR:
4343 case XAPIC_OFF_DFR:
4344 case XAPIC_OFF_SVR:
4345 case XAPIC_OFF_ESR:
4346 case XAPIC_OFF_ICR_LO:
4347 case XAPIC_OFF_ICR_HI:
4348 case XAPIC_OFF_LVT_TIMER:
4349 case XAPIC_OFF_LVT_THERMAL:
4350 case XAPIC_OFF_LVT_PERF:
4351 case XAPIC_OFF_LVT_LINT0:
4352 case XAPIC_OFF_LVT_LINT1:
4353 case XAPIC_OFF_LVT_ERROR:
4354 case XAPIC_OFF_TIMER_ICR:
4355 case XAPIC_OFF_TIMER_DCR:
4356 break;
4357 default:
4358 return true;
4359 }
4360 }
4361 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4362 {
4363 /*
4364 * With virtual-interrupt delivery, a write access to any of the
4365 * following registers are virtualized. Accessing any other register
4366 * causes a VM-exit.
4367 *
4368 * Note! The specification does not allow writing to offsets in-between
4369 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4370 */
4371 switch (offAccess)
4372 {
4373 case XAPIC_OFF_TPR:
4374 case XAPIC_OFF_EOI:
4375 case XAPIC_OFF_ICR_LO:
4376 break;
4377 default:
4378 return true;
4379 }
4380 }
4381 else
4382 {
4383 /*
4384 * Without APIC-register virtualization or virtual-interrupt delivery,
4385 * only TPR accesses are virtualized.
4386 */
4387 if (offAccess == XAPIC_OFF_TPR)
4388 { /* likely */ }
4389 else
4390 return true;
4391 }
4392 }
4393 else
4394 {
4395 /*
4396 * Check read accesses to the APIC-access page that cause VM-exits.
4397 */
4398 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4399 {
4400 /*
4401 * With APIC-register virtualization, a read access to any of the
4402 * following registers are virtualized. Accessing any other register
4403 * causes a VM-exit.
4404 */
4405 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4406 switch (offAlignedAccess)
4407 {
4408 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4409 case XAPIC_OFF_ID:
4410 case XAPIC_OFF_VERSION:
4411 case XAPIC_OFF_TPR:
4412 case XAPIC_OFF_EOI:
4413 case XAPIC_OFF_LDR:
4414 case XAPIC_OFF_DFR:
4415 case XAPIC_OFF_SVR:
4416 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4417 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4418 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4419 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4420 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4421 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4422 case XAPIC_OFF_ESR:
4423 case XAPIC_OFF_ICR_LO:
4424 case XAPIC_OFF_ICR_HI:
4425 case XAPIC_OFF_LVT_TIMER:
4426 case XAPIC_OFF_LVT_THERMAL:
4427 case XAPIC_OFF_LVT_PERF:
4428 case XAPIC_OFF_LVT_LINT0:
4429 case XAPIC_OFF_LVT_LINT1:
4430 case XAPIC_OFF_LVT_ERROR:
4431 case XAPIC_OFF_TIMER_ICR:
4432 case XAPIC_OFF_TIMER_DCR:
4433 break;
4434 default:
4435 return true;
4436 }
4437 }
4438 else
4439 {
4440 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4441 if (offAccess == XAPIC_OFF_TPR)
4442 { /* likely */ }
4443 else
4444 return true;
4445 }
4446 }
4447
4448 /* The APIC-access is virtualized, does not cause a VM-exit. */
4449 return false;
4450}
4451
4452
4453/**
4454 * Virtualizes a memory-based APIC-access where the address is not used to access
4455 * memory.
4456 *
4457 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4458 * page-faults but do not use the address to access memory.
4459 *
4460 * @param pVCpu The cross context virtual CPU structure.
4461 * @param pGCPhysAccess Pointer to the guest-physical address used.
4462 */
4463IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPU pVCpu, PRTGCPHYS pGCPhysAccess)
4464{
4465 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4466 Assert(pVmcs);
4467 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4468 Assert(pGCPhysAccess);
4469
4470 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4471 RTGCPHYS const GCPhysApic = pVmcs->u64AddrApicAccess.u;
4472 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4473
4474 if (GCPhysAccess == GCPhysApic)
4475 {
4476 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4477 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4478 uint16_t const cbAccess = 1;
4479 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4480 if (fIntercept)
4481 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4482
4483 *pGCPhysAccess = GCPhysApic | offAccess;
4484 return VINF_VMX_MODIFIES_BEHAVIOR;
4485 }
4486
4487 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4488}
4489
4490
4491/**
4492 * Virtualizes a memory-based APIC-access.
4493 *
4494 * @returns VBox strict status code.
4495 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4496 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4497 *
4498 * @param pVCpu The cross context virtual CPU structure.
4499 * @param offAccess The offset of the register being accessed (within the
4500 * APIC-access page).
4501 * @param cbAccess The size of the access in bytes.
4502 * @param pvData Pointer to the data being written or where to store the data
4503 * being read.
4504 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4505 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4506 */
4507IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4508 uint32_t fAccess)
4509{
4510 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4511 Assert(pVmcs);
4512 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); NOREF(pVmcs);
4513 Assert(pvData);
4514 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4515 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4516 || (fAccess & IEM_ACCESS_INSTRUCTION));
4517
4518 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4519 if (fIntercept)
4520 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4521
4522 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4523 {
4524 /*
4525 * A write access to the APIC-access page that is virtualized (rather than
4526 * causing a VM-exit) writes data to the virtual-APIC page.
4527 */
4528 uint32_t const u32Data = *(uint32_t *)pvData;
4529 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4530
4531 /*
4532 * Record the currently updated APIC offset, as we need this later for figuring
4533 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4534 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4535 *
4536 * After completion of the current operation, we need to perform TPR virtualization,
4537 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4538 *
4539 * The current operation may be a REP-prefixed string instruction, execution of any
4540 * other instruction, or delivery of an event through the IDT.
4541 *
4542 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4543 * performed now but later after completion of the current operation.
4544 *
4545 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4546 */
4547 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4548 }
4549 else
4550 {
4551 /*
4552 * A read access from the APIC-access page that is virtualized (rather than
4553 * causing a VM-exit) returns data from the virtual-APIC page.
4554 *
4555 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4556 */
4557 Assert(cbAccess <= 4);
4558 Assert(offAccess < XAPIC_OFF_END + 4);
4559 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4560
4561 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4562 u32Data &= s_auAccessSizeMasks[cbAccess];
4563 *(uint32_t *)pvData = u32Data;
4564 }
4565
4566 return VINF_VMX_MODIFIES_BEHAVIOR;
4567}
4568
4569
4570/**
4571 * Virtualizes an MSR-based APIC read access.
4572 *
4573 * @returns VBox strict status code.
4574 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4575 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4576 * handled by the x2APIC device.
4577 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4578 * not within the range of valid MSRs, caller must raise \#GP(0).
4579 * @param pVCpu The cross context virtual CPU structure.
4580 * @param idMsr The x2APIC MSR being read.
4581 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4582 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4583 */
4584IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4585{
4586 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4587 Assert(pVmcs);
4588 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4589 Assert(pu64Value);
4590
4591 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4592 {
4593 /*
4594 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4595 * what the end of the valid x2APIC MSR range is. Hence the use of different
4596 * macros here.
4597 *
4598 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4599 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4600 */
4601 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4602 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4603 {
4604 uint16_t const offReg = (idMsr & 0xff) << 4;
4605 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4606 *pu64Value = u64Value;
4607 return VINF_VMX_MODIFIES_BEHAVIOR;
4608 }
4609 return VERR_OUT_OF_RANGE;
4610 }
4611
4612 if (idMsr == MSR_IA32_X2APIC_TPR)
4613 {
4614 uint16_t const offReg = (idMsr & 0xff) << 4;
4615 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4616 *pu64Value = u64Value;
4617 return VINF_VMX_MODIFIES_BEHAVIOR;
4618 }
4619
4620 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4621}
4622
4623
4624/**
4625 * Virtualizes an MSR-based APIC write access.
4626 *
4627 * @returns VBox strict status code.
4628 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4629 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4630 * not within the range of valid MSRs, caller must raise \#GP(0).
4631 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4632 *
4633 * @param pVCpu The cross context virtual CPU structure.
4634 * @param idMsr The x2APIC MSR being written.
4635 * @param u64Value The value of the x2APIC MSR being written.
4636 */
4637IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4638{
4639 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4640 Assert(pVmcs);
4641
4642 /*
4643 * Check if the access is to be virtualized.
4644 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4645 */
4646 if ( idMsr == MSR_IA32_X2APIC_TPR
4647 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4648 && ( idMsr == MSR_IA32_X2APIC_EOI
4649 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4650 {
4651 /* Validate the MSR write depending on the register. */
4652 switch (idMsr)
4653 {
4654 case MSR_IA32_X2APIC_TPR:
4655 case MSR_IA32_X2APIC_SELF_IPI:
4656 {
4657 if (u64Value & UINT64_C(0xffffffffffffff00))
4658 return VERR_OUT_OF_RANGE;
4659 break;
4660 }
4661 case MSR_IA32_X2APIC_EOI:
4662 {
4663 if (u64Value != 0)
4664 return VERR_OUT_OF_RANGE;
4665 break;
4666 }
4667 }
4668
4669 /* Write the MSR to the virtual-APIC page. */
4670 uint16_t const offReg = (idMsr & 0xff) << 4;
4671 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4672
4673 /*
4674 * Record the currently updated APIC offset, as we need this later for figuring
4675 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4676 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4677 */
4678 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4679
4680 return VINF_VMX_MODIFIES_BEHAVIOR;
4681 }
4682
4683 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4684}
4685
4686
4687/**
4688 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4689 *
4690 * @returns VBox status code.
4691 * @retval VINF_SUCCES when the highest set bit is found.
4692 * @retval VERR_NOT_FOUND when no bit is set.
4693 *
4694 * @param pVCpu The cross context virtual CPU structure.
4695 * @param offReg The offset of the APIC 256-bit sparse register.
4696 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4697 * set in the register. Only valid when VINF_SUCCESS is
4698 * returned.
4699 *
4700 * @remarks The format of the 256-bit sparse register here mirrors that found in
4701 * real APIC hardware.
4702 */
4703static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4704{
4705 Assert(offReg < XAPIC_OFF_END + 4);
4706 Assert(pidxHighestBit);
4707
4708 /*
4709 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4710 * However, in each fragment only the first 4 bytes are used.
4711 */
4712 uint8_t const cFrags = 8;
4713 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4714 {
4715 uint16_t const offFrag = iFrag * 16;
4716 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offReg + offFrag);
4717 if (!u32Frag)
4718 continue;
4719
4720 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4721 Assert(idxHighestBit > 0);
4722 --idxHighestBit;
4723 Assert(idxHighestBit <= UINT8_MAX);
4724 *pidxHighestBit = idxHighestBit;
4725 return VINF_SUCCESS;
4726 }
4727 return VERR_NOT_FOUND;
4728}
4729
4730
4731/**
4732 * Evaluates pending virtual interrupts.
4733 *
4734 * @param pVCpu The cross context virtual CPU structure.
4735 */
4736IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPU pVCpu)
4737{
4738 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4739 Assert(pVmcs);
4740 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4741
4742 if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4743 {
4744 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4745 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4746
4747 if ((uRvi >> 4) > (uPpr >> 4))
4748 {
4749 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signaling pending interrupt\n", uRvi, uPpr));
4750 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4751 }
4752 else
4753 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4754 }
4755}
4756
4757
4758/**
4759 * Performs PPR virtualization.
4760 *
4761 * @returns VBox strict status code.
4762 * @param pVCpu The cross context virtual CPU structure.
4763 */
4764IEM_STATIC void iemVmxPprVirtualization(PVMCPU pVCpu)
4765{
4766 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4767 Assert(pVmcs);
4768 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4769 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4770
4771 /*
4772 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4773 * or EOI-virtualization.
4774 *
4775 * See Intel spec. 29.1.3 "PPR Virtualization".
4776 */
4777 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4778 uint32_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4779
4780 uint32_t uPpr;
4781 if (((uTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4782 uPpr = uTpr & 0xff;
4783 else
4784 uPpr = uSvi & 0xf0;
4785
4786 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4787 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4788}
4789
4790
4791/**
4792 * Performs VMX TPR virtualization.
4793 *
4794 * @returns VBox strict status code.
4795 * @param pVCpu The cross context virtual CPU structure.
4796 */
4797IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPU pVCpu)
4798{
4799 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4800 Assert(pVmcs);
4801 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4802
4803 /*
4804 * We should have already performed the virtual-APIC write to the TPR offset
4805 * in the virtual-APIC page. We now perform TPR virtualization.
4806 *
4807 * See Intel spec. 29.1.2 "TPR Virtualization".
4808 */
4809 if (!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4810 {
4811 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4812 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4813
4814 /*
4815 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4816 * See Intel spec. 29.1.2 "TPR Virtualization".
4817 */
4818 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4819 {
4820 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4821 iemVmxVmcsSetExitQual(pVCpu, 0);
4822 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
4823 }
4824 }
4825 else
4826 {
4827 iemVmxPprVirtualization(pVCpu);
4828 iemVmxEvalPendingVirtIntrs(pVCpu);
4829 }
4830
4831 return VINF_SUCCESS;
4832}
4833
4834
4835/**
4836 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4837 * not.
4838 *
4839 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4840 * @param pVCpu The cross context virtual CPU structure.
4841 * @param uVector The interrupt that was acknowledged using an EOI.
4842 */
4843IEM_STATIC bool iemVmxIsEoiInterceptSet(PVMCPU pVCpu, uint8_t uVector)
4844{
4845 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4846 Assert(pVmcs);
4847 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4848
4849 if (uVector < 64)
4850 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4851 if (uVector < 128)
4852 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4853 if (uVector < 192)
4854 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4855 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4856}
4857
4858
4859/**
4860 * Performs EOI virtualization.
4861 *
4862 * @returns VBox strict status code.
4863 * @param pVCpu The cross context virtual CPU structure.
4864 */
4865IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPU pVCpu)
4866{
4867 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4868 Assert(pVmcs);
4869 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4870
4871 /*
4872 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
4873 * and get the next guest-interrupt that's in-service (if any).
4874 *
4875 * See Intel spec. 29.1.4 "EOI Virtualization".
4876 */
4877 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4878 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4879 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
4880
4881 uint8_t uVector = uSvi;
4882 iemVmxVirtApicClearVector(pVCpu, XAPIC_OFF_ISR0, uVector);
4883
4884 uVector = 0;
4885 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
4886
4887 if (uVector)
4888 Log2(("eoi_virt: next interrupt %#x\n", uVector));
4889 else
4890 Log2(("eoi_virt: no interrupt pending in ISR\n"));
4891
4892 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
4893 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
4894
4895 iemVmxPprVirtualization(pVCpu);
4896 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
4897 return iemVmxVmexitVirtEoi(pVCpu, uVector);
4898 iemVmxEvalPendingVirtIntrs(pVCpu);
4899 return VINF_SUCCESS;
4900}
4901
4902
4903/**
4904 * Performs self-IPI virtualization.
4905 *
4906 * @returns VBox strict status code.
4907 * @param pVCpu The cross context virtual CPU structure.
4908 */
4909IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPU pVCpu)
4910{
4911 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4912 Assert(pVmcs);
4913 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4914
4915 /*
4916 * We should have already performed the virtual-APIC write to the self-IPI offset
4917 * in the virtual-APIC page. We now perform self-IPI virtualization.
4918 *
4919 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
4920 */
4921 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
4922 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
4923 iemVmxVirtApicSetVector(pVCpu, XAPIC_OFF_IRR0, uVector);
4924 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4925 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4926 if (uVector > uRvi)
4927 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
4928 iemVmxEvalPendingVirtIntrs(pVCpu);
4929 return VINF_SUCCESS;
4930}
4931
4932
4933/**
4934 * Performs VMX APIC-write emulation.
4935 *
4936 * @returns VBox strict status code.
4937 * @param pVCpu The cross context virtual CPU structure.
4938 */
4939IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPU pVCpu)
4940{
4941 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4942 Assert(pVmcs);
4943
4944 /*
4945 * Perform APIC-write emulation based on the virtual-APIC register written.
4946 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4947 */
4948 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
4949 VBOXSTRICTRC rcStrict;
4950 switch (offApicWrite)
4951 {
4952 case XAPIC_OFF_TPR:
4953 {
4954 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
4955 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4956 uTpr &= UINT32_C(0x000000ff);
4957 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
4958 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
4959 rcStrict = iemVmxTprVirtualization(pVCpu);
4960 break;
4961 }
4962
4963 case XAPIC_OFF_EOI:
4964 {
4965 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4966 {
4967 /* Clear VEOI and perform EOI virtualization. */
4968 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
4969 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
4970 rcStrict = iemVmxEoiVirtualization(pVCpu);
4971 }
4972 else
4973 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4974 break;
4975 }
4976
4977 case XAPIC_OFF_ICR_LO:
4978 {
4979 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4980 {
4981 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
4982 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4983 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
4984 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
4985 if ( !(uIcrLo & fIcrLoMb0)
4986 && (uIcrLo & fIcrLoMb1))
4987 {
4988 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
4989 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
4990 }
4991 else
4992 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4993 }
4994 else
4995 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4996 break;
4997 }
4998
4999 case XAPIC_OFF_ICR_HI:
5000 {
5001 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
5002 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
5003 uIcrHi &= UINT32_C(0xff000000);
5004 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
5005 rcStrict = VINF_SUCCESS;
5006 break;
5007 }
5008
5009 default:
5010 {
5011 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
5012 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
5013 break;
5014 }
5015 }
5016
5017 return rcStrict;
5018}
5019
5020
5021/**
5022 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
5023 *
5024 * @param pVCpu The cross context virtual CPU structure.
5025 * @param pszInstr The VMX instruction name (for logging purposes).
5026 */
5027IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
5028{
5029 /*
5030 * Guest Control Registers, Debug Registers, and MSRs.
5031 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
5032 */
5033 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5034 const char *const pszFailure = "VM-exit";
5035 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5036
5037 /* CR0 reserved bits. */
5038 {
5039 /* CR0 MB1 bits. */
5040 uint64_t u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
5041 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
5042 if (fUnrestrictedGuest)
5043 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
5044 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
5045 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
5046
5047 /* CR0 MBZ bits. */
5048 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5049 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
5050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
5051
5052 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
5053 if ( !fUnrestrictedGuest
5054 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
5055 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5056 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
5057 }
5058
5059 /* CR4 reserved bits. */
5060 {
5061 /* CR4 MB1 bits. */
5062 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
5063 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
5064 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
5065
5066 /* CR4 MBZ bits. */
5067 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
5068 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
5069 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
5070 }
5071
5072 /* DEBUGCTL MSR. */
5073 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5074 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
5075 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
5076
5077 /* 64-bit CPU checks. */
5078 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5079 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5080 {
5081 if (fGstInLongMode)
5082 {
5083 /* PAE must be set. */
5084 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
5085 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
5086 { /* likely */ }
5087 else
5088 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
5089 }
5090 else
5091 {
5092 /* PCIDE should not be set. */
5093 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
5094 { /* likely */ }
5095 else
5096 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
5097 }
5098
5099 /* CR3. */
5100 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5101 { /* likely */ }
5102 else
5103 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
5104
5105 /* DR7. */
5106 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5107 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
5108 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
5109
5110 /* SYSENTER ESP and SYSENTER EIP. */
5111 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
5112 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
5113 { /* likely */ }
5114 else
5115 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
5116 }
5117
5118 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5119 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
5120
5121 /* PAT MSR. */
5122 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5123 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
5124 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
5125
5126 /* EFER MSR. */
5127 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5128 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5129 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
5130 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
5131
5132 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5133 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5134 if ( fGstInLongMode == fGstLma
5135 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
5136 || fGstLma == fGstLme))
5137 { /* likely */ }
5138 else
5139 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
5140
5141 /* We don't support IA32_BNDCFGS MSR yet. */
5142 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
5143
5144 NOREF(pszInstr);
5145 NOREF(pszFailure);
5146 return VINF_SUCCESS;
5147}
5148
5149
5150/**
5151 * Checks guest segment registers, LDTR and TR as part of VM-entry.
5152 *
5153 * @param pVCpu The cross context virtual CPU structure.
5154 * @param pszInstr The VMX instruction name (for logging purposes).
5155 */
5156IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
5157{
5158 /*
5159 * Segment registers.
5160 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5161 */
5162 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5163 const char *const pszFailure = "VM-exit";
5164 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
5165 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5166 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5167
5168 /* Selectors. */
5169 if ( !fGstInV86Mode
5170 && !fUnrestrictedGuest
5171 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
5172 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
5173
5174 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5175 {
5176 CPUMSELREG SelReg;
5177 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
5178 if (RT_LIKELY(rc == VINF_SUCCESS))
5179 { /* likely */ }
5180 else
5181 return rc;
5182
5183 /*
5184 * Virtual-8086 mode checks.
5185 */
5186 if (fGstInV86Mode)
5187 {
5188 /* Base address. */
5189 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
5190 { /* likely */ }
5191 else
5192 {
5193 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
5194 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5195 }
5196
5197 /* Limit. */
5198 if (SelReg.u32Limit == 0xffff)
5199 { /* likely */ }
5200 else
5201 {
5202 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
5203 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5204 }
5205
5206 /* Attribute. */
5207 if (SelReg.Attr.u == 0xf3)
5208 { /* likely */ }
5209 else
5210 {
5211 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
5212 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5213 }
5214
5215 /* We're done; move to checking the next segment. */
5216 continue;
5217 }
5218
5219 /* Checks done by 64-bit CPUs. */
5220 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5221 {
5222 /* Base address. */
5223 if ( iSegReg == X86_SREG_FS
5224 || iSegReg == X86_SREG_GS)
5225 {
5226 if (X86_IS_CANONICAL(SelReg.u64Base))
5227 { /* likely */ }
5228 else
5229 {
5230 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5231 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5232 }
5233 }
5234 else if (iSegReg == X86_SREG_CS)
5235 {
5236 if (!RT_HI_U32(SelReg.u64Base))
5237 { /* likely */ }
5238 else
5239 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5240 }
5241 else
5242 {
5243 if ( SelReg.Attr.n.u1Unusable
5244 || !RT_HI_U32(SelReg.u64Base))
5245 { /* likely */ }
5246 else
5247 {
5248 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5249 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5250 }
5251 }
5252 }
5253
5254 /*
5255 * Checks outside Virtual-8086 mode.
5256 */
5257 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5258 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5259 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5260 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5261 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5262 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5263 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5264 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5265
5266 /* Code or usable segment. */
5267 if ( iSegReg == X86_SREG_CS
5268 || fUsable)
5269 {
5270 /* Reserved bits (bits 31:17 and bits 11:8). */
5271 if (!(SelReg.Attr.u & 0xfffe0f00))
5272 { /* likely */ }
5273 else
5274 {
5275 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5276 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5277 }
5278
5279 /* Descriptor type. */
5280 if (fCodeDataSeg)
5281 { /* likely */ }
5282 else
5283 {
5284 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5286 }
5287
5288 /* Present. */
5289 if (fPresent)
5290 { /* likely */ }
5291 else
5292 {
5293 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5294 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5295 }
5296
5297 /* Granularity. */
5298 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5299 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5300 { /* likely */ }
5301 else
5302 {
5303 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5304 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5305 }
5306 }
5307
5308 if (iSegReg == X86_SREG_CS)
5309 {
5310 /* Segment Type and DPL. */
5311 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5312 && fUnrestrictedGuest)
5313 {
5314 if (uDpl == 0)
5315 { /* likely */ }
5316 else
5317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5318 }
5319 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5320 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5321 {
5322 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5323 if (uDpl == AttrSs.n.u2Dpl)
5324 { /* likely */ }
5325 else
5326 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5327 }
5328 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5329 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5330 {
5331 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5332 if (uDpl <= AttrSs.n.u2Dpl)
5333 { /* likely */ }
5334 else
5335 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5336 }
5337 else
5338 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5339
5340 /* Def/Big. */
5341 if ( fGstInLongMode
5342 && fSegLong)
5343 {
5344 if (uDefBig == 0)
5345 { /* likely */ }
5346 else
5347 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5348 }
5349 }
5350 else if (iSegReg == X86_SREG_SS)
5351 {
5352 /* Segment Type. */
5353 if ( !fUsable
5354 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5355 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5356 { /* likely */ }
5357 else
5358 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5359
5360 /* DPL. */
5361 if (fUnrestrictedGuest)
5362 {
5363 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5364 { /* likely */ }
5365 else
5366 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5367 }
5368 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5369 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5370 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
5371 {
5372 if (uDpl == 0)
5373 { /* likely */ }
5374 else
5375 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5376 }
5377 }
5378 else
5379 {
5380 /* DS, ES, FS, GS. */
5381 if (fUsable)
5382 {
5383 /* Segment type. */
5384 if (uSegType & X86_SEL_TYPE_ACCESSED)
5385 { /* likely */ }
5386 else
5387 {
5388 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5389 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5390 }
5391
5392 if ( !(uSegType & X86_SEL_TYPE_CODE)
5393 || (uSegType & X86_SEL_TYPE_READ))
5394 { /* likely */ }
5395 else
5396 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5397
5398 /* DPL. */
5399 if ( !fUnrestrictedGuest
5400 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5401 {
5402 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5403 { /* likely */ }
5404 else
5405 {
5406 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5407 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5408 }
5409 }
5410 }
5411 }
5412 }
5413
5414 /*
5415 * LDTR.
5416 */
5417 {
5418 CPUMSELREG Ldtr;
5419 Ldtr.Sel = pVmcs->GuestLdtr;
5420 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5421 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5422 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
5423
5424 if (!Ldtr.Attr.n.u1Unusable)
5425 {
5426 /* Selector. */
5427 if (!(Ldtr.Sel & X86_SEL_LDT))
5428 { /* likely */ }
5429 else
5430 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5431
5432 /* Base. */
5433 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5434 {
5435 if (X86_IS_CANONICAL(Ldtr.u64Base))
5436 { /* likely */ }
5437 else
5438 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5439 }
5440
5441 /* Attributes. */
5442 /* Reserved bits (bits 31:17 and bits 11:8). */
5443 if (!(Ldtr.Attr.u & 0xfffe0f00))
5444 { /* likely */ }
5445 else
5446 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5447
5448 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5449 { /* likely */ }
5450 else
5451 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5452
5453 if (!Ldtr.Attr.n.u1DescType)
5454 { /* likely */ }
5455 else
5456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5457
5458 if (Ldtr.Attr.n.u1Present)
5459 { /* likely */ }
5460 else
5461 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5462
5463 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5464 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5465 { /* likely */ }
5466 else
5467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5468 }
5469 }
5470
5471 /*
5472 * TR.
5473 */
5474 {
5475 CPUMSELREG Tr;
5476 Tr.Sel = pVmcs->GuestTr;
5477 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5478 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5479 Tr.Attr.u = pVmcs->u32GuestTrLimit;
5480
5481 /* Selector. */
5482 if (!(Tr.Sel & X86_SEL_LDT))
5483 { /* likely */ }
5484 else
5485 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5486
5487 /* Base. */
5488 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5489 {
5490 if (X86_IS_CANONICAL(Tr.u64Base))
5491 { /* likely */ }
5492 else
5493 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5494 }
5495
5496 /* Attributes. */
5497 /* Reserved bits (bits 31:17 and bits 11:8). */
5498 if (!(Tr.Attr.u & 0xfffe0f00))
5499 { /* likely */ }
5500 else
5501 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5502
5503 if (!Tr.Attr.n.u1Unusable)
5504 { /* likely */ }
5505 else
5506 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5507
5508 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5509 || ( !fGstInLongMode
5510 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5511 { /* likely */ }
5512 else
5513 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5514
5515 if (!Tr.Attr.n.u1DescType)
5516 { /* likely */ }
5517 else
5518 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5519
5520 if (Tr.Attr.n.u1Present)
5521 { /* likely */ }
5522 else
5523 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5524
5525 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5526 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5527 { /* likely */ }
5528 else
5529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5530 }
5531
5532 NOREF(pszInstr);
5533 NOREF(pszFailure);
5534 return VINF_SUCCESS;
5535}
5536
5537
5538/**
5539 * Checks guest GDTR and IDTR as part of VM-entry.
5540 *
5541 * @param pVCpu The cross context virtual CPU structure.
5542 * @param pszInstr The VMX instruction name (for logging purposes).
5543 */
5544IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5545{
5546 /*
5547 * GDTR and IDTR.
5548 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5549 */
5550 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5551 const char *const pszFailure = "VM-exit";
5552
5553 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5554 {
5555 /* Base. */
5556 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5557 { /* likely */ }
5558 else
5559 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5560
5561 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5562 { /* likely */ }
5563 else
5564 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5565 }
5566
5567 /* Limit. */
5568 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5569 { /* likely */ }
5570 else
5571 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5572
5573 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5574 { /* likely */ }
5575 else
5576 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5577
5578 NOREF(pszInstr);
5579 NOREF(pszFailure);
5580 return VINF_SUCCESS;
5581}
5582
5583
5584/**
5585 * Checks guest RIP and RFLAGS as part of VM-entry.
5586 *
5587 * @param pVCpu The cross context virtual CPU structure.
5588 * @param pszInstr The VMX instruction name (for logging purposes).
5589 */
5590IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5591{
5592 /*
5593 * RIP and RFLAGS.
5594 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5595 */
5596 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5597 const char *const pszFailure = "VM-exit";
5598 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5599
5600 /* RIP. */
5601 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5602 {
5603 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5604 if ( !fGstInLongMode
5605 || !AttrCs.n.u1Long)
5606 {
5607 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5608 { /* likely */ }
5609 else
5610 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5611 }
5612
5613 if ( fGstInLongMode
5614 && AttrCs.n.u1Long)
5615 {
5616 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5617 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5618 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5619 { /* likely */ }
5620 else
5621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5622 }
5623 }
5624
5625 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5626 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5627 : pVmcs->u64GuestRFlags.s.Lo;
5628 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5629 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5630 { /* likely */ }
5631 else
5632 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5633
5634 if ( fGstInLongMode
5635 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5636 {
5637 if (!(uGuestRFlags & X86_EFL_VM))
5638 { /* likely */ }
5639 else
5640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5641 }
5642
5643 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5644 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5645 {
5646 if (uGuestRFlags & X86_EFL_IF)
5647 { /* likely */ }
5648 else
5649 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5650 }
5651
5652 NOREF(pszInstr);
5653 NOREF(pszFailure);
5654 return VINF_SUCCESS;
5655}
5656
5657
5658/**
5659 * Checks guest non-register state as part of VM-entry.
5660 *
5661 * @param pVCpu The cross context virtual CPU structure.
5662 * @param pszInstr The VMX instruction name (for logging purposes).
5663 */
5664IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5665{
5666 /*
5667 * Guest non-register state.
5668 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5669 */
5670 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5671 const char *const pszFailure = "VM-exit";
5672
5673 /*
5674 * Activity state.
5675 */
5676 uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
5677 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5678 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5679 { /* likely */ }
5680 else
5681 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5682
5683 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5684 if ( !AttrSs.n.u2Dpl
5685 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5686 { /* likely */ }
5687 else
5688 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5689
5690 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5691 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5692 {
5693 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5694 { /* likely */ }
5695 else
5696 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5697 }
5698
5699 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5700 {
5701 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5702 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5703 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5704 switch (pVmcs->u32GuestActivityState)
5705 {
5706 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5707 {
5708 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5709 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5710 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5711 && ( uVector == X86_XCPT_DB
5712 || uVector == X86_XCPT_MC))
5713 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5714 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5715 { /* likely */ }
5716 else
5717 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5718 break;
5719 }
5720
5721 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5722 {
5723 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5724 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5725 && uVector == X86_XCPT_MC))
5726 { /* likely */ }
5727 else
5728 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5729 break;
5730 }
5731
5732 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5733 default:
5734 break;
5735 }
5736 }
5737
5738 /*
5739 * Interruptibility state.
5740 */
5741 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5742 { /* likely */ }
5743 else
5744 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5745
5746 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5747 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5748 { /* likely */ }
5749 else
5750 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5751
5752 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5753 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5754 { /* likely */ }
5755 else
5756 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5757
5758 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5759 {
5760 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5761 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5762 {
5763 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5764 { /* likely */ }
5765 else
5766 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5767 }
5768 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5769 {
5770 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5771 { /* likely */ }
5772 else
5773 {
5774 /*
5775 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5776 * We update the VM-exit qualification only when blocking-by-STI is set
5777 * without blocking-by-MovSS being set. Although in practise it does not
5778 * make much difference since the order of checks are implementation defined.
5779 */
5780 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5781 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5782 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5783 }
5784
5785 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5786 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5787 { /* likely */ }
5788 else
5789 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5790 }
5791 }
5792
5793 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5794 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5795 { /* likely */ }
5796 else
5797 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5798
5799 /* We don't support SGX yet. So enclave-interruption must not be set. */
5800 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5801 { /* likely */ }
5802 else
5803 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5804
5805 /*
5806 * Pending debug exceptions.
5807 */
5808 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5809 ? pVmcs->u64GuestPendingDbgXcpt.u
5810 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5811 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5812 { /* likely */ }
5813 else
5814 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5815
5816 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5817 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5818 {
5819 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5820 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5821 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5822 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5823
5824 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5825 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5826 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5827 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5828 }
5829
5830 /* We don't support RTM (Real-time Transactional Memory) yet. */
5831 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
5832 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5833
5834 /*
5835 * VMCS link pointer.
5836 */
5837 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5838 {
5839 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5840 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5841 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5842 { /* likely */ }
5843 else
5844 {
5845 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5846 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5847 }
5848
5849 /* Validate the address. */
5850 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5851 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5852 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5853 {
5854 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5856 }
5857
5858 /* Read the VMCS-link pointer from guest memory. */
5859 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5860 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5861 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5862 if (RT_FAILURE(rc))
5863 {
5864 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5865 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5866 }
5867
5868 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5869 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5870 { /* likely */ }
5871 else
5872 {
5873 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5874 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5875 }
5876
5877 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5878 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5879 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5880 { /* likely */ }
5881 else
5882 {
5883 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5884 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5885 }
5886
5887 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5888 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5889 }
5890
5891 NOREF(pszInstr);
5892 NOREF(pszFailure);
5893 return VINF_SUCCESS;
5894}
5895
5896
5897/**
5898 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5899 * VM-entry.
5900 *
5901 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5902 * @param pVCpu The cross context virtual CPU structure.
5903 * @param pszInstr The VMX instruction name (for logging purposes).
5904 * @param pVmcs Pointer to the virtual VMCS.
5905 */
5906IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5907{
5908 /*
5909 * Check PDPTEs.
5910 * See Intel spec. 4.4.1 "PDPTE Registers".
5911 */
5912 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5913 const char *const pszFailure = "VM-exit";
5914
5915 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5916 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5917 if (RT_SUCCESS(rc))
5918 {
5919 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5920 {
5921 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5922 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5923 { /* likely */ }
5924 else
5925 {
5926 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5927 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5928 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5929 }
5930 }
5931 }
5932 else
5933 {
5934 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5935 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5936 }
5937
5938 NOREF(pszFailure);
5939 NOREF(pszInstr);
5940 return rc;
5941}
5942
5943
5944/**
5945 * Checks guest PDPTEs as part of VM-entry.
5946 *
5947 * @param pVCpu The cross context virtual CPU structure.
5948 * @param pszInstr The VMX instruction name (for logging purposes).
5949 */
5950IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5951{
5952 /*
5953 * Guest PDPTEs.
5954 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5955 */
5956 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5957 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5958
5959 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5960 int rc;
5961 if ( !fGstInLongMode
5962 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5963 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5964 {
5965 /*
5966 * We don't support nested-paging for nested-guests yet.
5967 *
5968 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5969 * rather we need to check the PDPTEs referenced by the guest CR3.
5970 */
5971 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5972 }
5973 else
5974 rc = VINF_SUCCESS;
5975 return rc;
5976}
5977
5978
5979/**
5980 * Checks guest-state as part of VM-entry.
5981 *
5982 * @returns VBox status code.
5983 * @param pVCpu The cross context virtual CPU structure.
5984 * @param pszInstr The VMX instruction name (for logging purposes).
5985 */
5986IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5987{
5988 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5989 if (RT_SUCCESS(rc))
5990 {
5991 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5992 if (RT_SUCCESS(rc))
5993 {
5994 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5995 if (RT_SUCCESS(rc))
5996 {
5997 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5998 if (RT_SUCCESS(rc))
5999 {
6000 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
6001 if (RT_SUCCESS(rc))
6002 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
6003 }
6004 }
6005 }
6006 }
6007 return rc;
6008}
6009
6010
6011/**
6012 * Checks host-state as part of VM-entry.
6013 *
6014 * @returns VBox status code.
6015 * @param pVCpu The cross context virtual CPU structure.
6016 * @param pszInstr The VMX instruction name (for logging purposes).
6017 */
6018IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
6019{
6020 /*
6021 * Host Control Registers and MSRs.
6022 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
6023 */
6024 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6025 const char * const pszFailure = "VMFail";
6026
6027 /* CR0 reserved bits. */
6028 {
6029 /* CR0 MB1 bits. */
6030 uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
6031 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
6032 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
6033
6034 /* CR0 MBZ bits. */
6035 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
6036 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
6037 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
6038 }
6039
6040 /* CR4 reserved bits. */
6041 {
6042 /* CR4 MB1 bits. */
6043 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6044 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
6045 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
6046
6047 /* CR4 MBZ bits. */
6048 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6049 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
6050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
6051 }
6052
6053 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6054 {
6055 /* CR3 reserved bits. */
6056 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
6057 { /* likely */ }
6058 else
6059 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
6060
6061 /* SYSENTER ESP and SYSENTER EIP. */
6062 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
6063 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
6064 { /* likely */ }
6065 else
6066 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
6067 }
6068
6069 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6070 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
6071
6072 /* PAT MSR. */
6073 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
6074 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
6075 { /* likely */ }
6076 else
6077 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
6078
6079 /* EFER MSR. */
6080 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
6081 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
6082 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
6083 { /* likely */ }
6084 else
6085 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
6086
6087 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
6088 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
6089 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
6090 if ( fHostInLongMode == fHostLma
6091 && fHostInLongMode == fHostLme)
6092 { /* likely */ }
6093 else
6094 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
6095
6096 /*
6097 * Host Segment and Descriptor-Table Registers.
6098 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
6099 */
6100 /* Selector RPL and TI. */
6101 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
6102 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
6103 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
6104 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
6105 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
6106 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
6107 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
6108 { /* likely */ }
6109 else
6110 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
6111
6112 /* CS and TR selectors cannot be 0. */
6113 if ( pVmcs->HostCs
6114 && pVmcs->HostTr)
6115 { /* likely */ }
6116 else
6117 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
6118
6119 /* SS cannot be 0 if 32-bit host. */
6120 if ( fHostInLongMode
6121 || pVmcs->HostSs)
6122 { /* likely */ }
6123 else
6124 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
6125
6126 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6127 {
6128 /* FS, GS, GDTR, IDTR, TR base address. */
6129 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6130 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6131 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
6132 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
6133 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
6134 { /* likely */ }
6135 else
6136 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
6137 }
6138
6139 /*
6140 * Host address-space size for 64-bit CPUs.
6141 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
6142 */
6143 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6144 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6145 {
6146 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
6147
6148 /* Logical processor in IA-32e mode. */
6149 if (fCpuInLongMode)
6150 {
6151 if (fHostInLongMode)
6152 {
6153 /* PAE must be set. */
6154 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
6155 { /* likely */ }
6156 else
6157 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
6158
6159 /* RIP must be canonical. */
6160 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
6161 { /* likely */ }
6162 else
6163 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
6164 }
6165 else
6166 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
6167 }
6168 else
6169 {
6170 /* Logical processor is outside IA-32e mode. */
6171 if ( !fGstInLongMode
6172 && !fHostInLongMode)
6173 {
6174 /* PCIDE should not be set. */
6175 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
6176 { /* likely */ }
6177 else
6178 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
6179
6180 /* The high 32-bits of RIP MBZ. */
6181 if (!pVmcs->u64HostRip.s.Hi)
6182 { /* likely */ }
6183 else
6184 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
6185 }
6186 else
6187 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
6188 }
6189 }
6190 else
6191 {
6192 /* Host address-space size for 32-bit CPUs. */
6193 if ( !fGstInLongMode
6194 && !fHostInLongMode)
6195 { /* likely */ }
6196 else
6197 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
6198 }
6199
6200 NOREF(pszInstr);
6201 NOREF(pszFailure);
6202 return VINF_SUCCESS;
6203}
6204
6205
6206/**
6207 * Checks VM-entry controls fields as part of VM-entry.
6208 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6209 *
6210 * @returns VBox status code.
6211 * @param pVCpu The cross context virtual CPU structure.
6212 * @param pszInstr The VMX instruction name (for logging purposes).
6213 */
6214IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
6215{
6216 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6217 const char * const pszFailure = "VMFail";
6218
6219 /* VM-entry controls. */
6220 VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
6221 if (~pVmcs->u32EntryCtls & EntryCtls.n.allowed0)
6222 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6223
6224 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
6225 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6226
6227 /* Event injection. */
6228 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6229 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6230 {
6231 /* Type and vector. */
6232 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6233 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6234 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6235 if ( !uRsvd
6236 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6237 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
6238 { /* likely */ }
6239 else
6240 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6241
6242 /* Exception error code. */
6243 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6244 {
6245 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6246 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6247 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6248 { /* likely */ }
6249 else
6250 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6251
6252 /* Exceptions that provide an error code. */
6253 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6254 && ( uVector == X86_XCPT_DF
6255 || uVector == X86_XCPT_TS
6256 || uVector == X86_XCPT_NP
6257 || uVector == X86_XCPT_SS
6258 || uVector == X86_XCPT_GP
6259 || uVector == X86_XCPT_PF
6260 || uVector == X86_XCPT_AC))
6261 { /* likely */ }
6262 else
6263 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6264
6265 /* Exception error-code reserved bits. */
6266 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6267 { /* likely */ }
6268 else
6269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6270
6271 /* Injecting a software interrupt, software exception or privileged software exception. */
6272 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6273 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6274 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6275 {
6276 /* Instruction length must be in the range 0-15. */
6277 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6278 { /* likely */ }
6279 else
6280 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6281
6282 /* Instruction length of 0 is allowed only when its CPU feature is present. */
6283 if ( pVmcs->u32EntryInstrLen == 0
6284 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6286 }
6287 }
6288 }
6289
6290 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6291 if (pVmcs->u32EntryMsrLoadCount)
6292 {
6293 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6294 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6295 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6297 }
6298
6299 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6300 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6301
6302 NOREF(pszInstr);
6303 NOREF(pszFailure);
6304 return VINF_SUCCESS;
6305}
6306
6307
6308/**
6309 * Checks VM-exit controls fields as part of VM-entry.
6310 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6311 *
6312 * @returns VBox status code.
6313 * @param pVCpu The cross context virtual CPU structure.
6314 * @param pszInstr The VMX instruction name (for logging purposes).
6315 */
6316IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
6317{
6318 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6319 const char * const pszFailure = "VMFail";
6320
6321 /* VM-exit controls. */
6322 VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
6323 if (~pVmcs->u32ExitCtls & ExitCtls.n.allowed0)
6324 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6325
6326 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
6327 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6328
6329 /* Save preemption timer without activating it. */
6330 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6331 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6332 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6333
6334 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6335 if (pVmcs->u32ExitMsrStoreCount)
6336 {
6337 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6338 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6339 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6340 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6341 }
6342
6343 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6344 if (pVmcs->u32ExitMsrLoadCount)
6345 {
6346 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6347 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6348 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6349 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6350 }
6351
6352 NOREF(pszInstr);
6353 NOREF(pszFailure);
6354 return VINF_SUCCESS;
6355}
6356
6357
6358/**
6359 * Checks VM-execution controls fields as part of VM-entry.
6360 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6361 *
6362 * @returns VBox status code.
6363 * @param pVCpu The cross context virtual CPU structure.
6364 * @param pszInstr The VMX instruction name (for logging purposes).
6365 *
6366 * @remarks This may update secondary-processor based VM-execution control fields
6367 * in the current VMCS if necessary.
6368 */
6369IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
6370{
6371 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6372 const char * const pszFailure = "VMFail";
6373
6374 /* Pin-based VM-execution controls. */
6375 {
6376 VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
6377 if (~pVmcs->u32PinCtls & PinCtls.n.allowed0)
6378 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6379
6380 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
6381 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6382 }
6383
6384 /* Processor-based VM-execution controls. */
6385 {
6386 VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
6387 if (~pVmcs->u32ProcCtls & ProcCtls.n.allowed0)
6388 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6389
6390 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
6391 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6392 }
6393
6394 /* Secondary processor-based VM-execution controls. */
6395 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6396 {
6397 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
6398 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0)
6399 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6400
6401 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
6402 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6403 }
6404 else
6405 Assert(!pVmcs->u32ProcCtls2);
6406
6407 /* CR3-target count. */
6408 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6409 { /* likely */ }
6410 else
6411 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6412
6413 /* I/O bitmaps physical addresses. */
6414 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6415 {
6416 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
6417 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6418 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
6419 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6420
6421 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
6422 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6423 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
6424 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6425 }
6426
6427 /* MSR bitmap physical address. */
6428 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6429 {
6430 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6431 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6432 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6433 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6434 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6435
6436 /* Read the MSR bitmap. */
6437 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6438 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
6439 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
6440 if (RT_FAILURE(rc))
6441 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6442 }
6443
6444 /* TPR shadow related controls. */
6445 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6446 {
6447 /* Virtual-APIC page physical address. */
6448 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6449 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6450 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6451 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6452 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6453
6454 /* Read the Virtual-APIC page. */
6455 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
6456 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
6457 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
6458 if (RT_FAILURE(rc))
6459 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6460
6461 /* TPR threshold without virtual-interrupt delivery. */
6462 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6463 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
6464 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6465
6466 /* TPR threshold and VTPR. */
6467 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
6468 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
6469 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6470 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6471 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
6472 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6473 }
6474 else
6475 {
6476 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6477 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6478 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6479 { /* likely */ }
6480 else
6481 {
6482 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6483 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6484 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6485 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6486 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6487 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6488 }
6489 }
6490
6491 /* NMI exiting and virtual-NMIs. */
6492 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6493 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6494 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6495
6496 /* Virtual-NMIs and NMI-window exiting. */
6497 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6498 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6499 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6500
6501 /* Virtualize APIC accesses. */
6502 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6503 {
6504 /* APIC-access physical address. */
6505 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6506 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6507 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6508 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6509 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6510
6511 /*
6512 * Disallow APIC-access page and virtual-APIC page from being the same address.
6513 * Note! This is not an Intel requirement, but one imposed by our implementation.
6514 */
6515 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6516 * redirecting accesses between the APIC-access page and the virtual-APIC
6517 * page. If any nested hypervisor requires this, we can implement it later. */
6518 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6519 {
6520 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6521 if (GCPhysVirtApic == GCPhysApicAccess)
6522 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6523 }
6524
6525 /*
6526 * Register the handler for the APIC-access page.
6527 *
6528 * We don't deregister the APIC-access page handler during the VM-exit as a different
6529 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
6530 *
6531 * We leave the page registered until the first access that happens outside VMX non-root
6532 * mode. Guest software is allowed to access structures such as the APIC-access page
6533 * only when no logical processor with a current VMCS references it in VMX non-root mode,
6534 * otherwise it can lead to unpredictable behavior including guest triple-faults.
6535 *
6536 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6537 */
6538 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess,
6539 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
6540 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
6541 if (RT_FAILURE(rc))
6542 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6543 }
6544
6545 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6546 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6547 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6548 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6549
6550 /* Virtual-interrupt delivery requires external interrupt exiting. */
6551 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6552 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6554
6555 /* VPID. */
6556 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6557 || pVmcs->u16Vpid != 0)
6558 { /* likely */ }
6559 else
6560 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6561
6562 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6563 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6564 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6565 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6566 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6567 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6568 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6569
6570 /* VMCS shadowing. */
6571 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6572 {
6573 /* VMREAD-bitmap physical address. */
6574 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6575 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6576 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6577 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6578 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6579
6580 /* VMWRITE-bitmap physical address. */
6581 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6582 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6583 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6584 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6585 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6586
6587 /* Read the VMREAD-bitmap. */
6588 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6589 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6590 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6591 if (RT_FAILURE(rc))
6592 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6593
6594 /* Read the VMWRITE-bitmap. */
6595 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6596 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6597 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6598 if (RT_FAILURE(rc))
6599 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6600 }
6601
6602 NOREF(pszInstr);
6603 NOREF(pszFailure);
6604 return VINF_SUCCESS;
6605}
6606
6607
6608/**
6609 * Loads the guest control registers, debug register and some MSRs as part of
6610 * VM-entry.
6611 *
6612 * @param pVCpu The cross context virtual CPU structure.
6613 */
6614IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6615{
6616 /*
6617 * Load guest control registers, debug registers and MSRs.
6618 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6619 */
6620 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6621 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6622 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6623 CPUMSetGuestCR0(pVCpu, uGstCr0);
6624 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6625 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6626
6627 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6628 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6629
6630 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6631 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6632 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6633
6634 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6635 {
6636 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6637
6638 /* EFER MSR. */
6639 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6640 {
6641 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6642 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6643 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6644 if (fGstInLongMode)
6645 {
6646 /* If the nested-guest is in long mode, LMA and LME are both set. */
6647 Assert(fGstPaging);
6648 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6649 }
6650 else
6651 {
6652 /*
6653 * If the nested-guest is outside long mode:
6654 * - With paging: LMA is cleared, LME is cleared.
6655 * - Without paging: LMA is cleared, LME is left unmodified.
6656 */
6657 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6658 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6659 }
6660 }
6661 /* else: see below. */
6662 }
6663
6664 /* PAT MSR. */
6665 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6666 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6667
6668 /* EFER MSR. */
6669 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6670 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6671
6672 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6673 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6674
6675 /* We don't support IA32_BNDCFGS MSR yet. */
6676 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6677
6678 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6679}
6680
6681
6682/**
6683 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure.
6686 */
6687IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6688{
6689 /*
6690 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6691 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6692 */
6693 /* CS, SS, ES, DS, FS, GS. */
6694 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6695 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6696 {
6697 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6698 CPUMSELREG VmcsSelReg;
6699 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6700 AssertRC(rc); NOREF(rc);
6701 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6702 {
6703 pGstSelReg->Sel = VmcsSelReg.Sel;
6704 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6705 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6706 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6707 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6708 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6709 }
6710 else
6711 {
6712 pGstSelReg->Sel = VmcsSelReg.Sel;
6713 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6714 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6715 switch (iSegReg)
6716 {
6717 case X86_SREG_CS:
6718 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6719 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6720 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6721 break;
6722
6723 case X86_SREG_SS:
6724 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6725 pGstSelReg->u32Limit = 0;
6726 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6727 break;
6728
6729 case X86_SREG_ES:
6730 case X86_SREG_DS:
6731 pGstSelReg->u64Base = 0;
6732 pGstSelReg->u32Limit = 0;
6733 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6734 break;
6735
6736 case X86_SREG_FS:
6737 case X86_SREG_GS:
6738 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6739 pGstSelReg->u32Limit = 0;
6740 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6741 break;
6742 }
6743 Assert(pGstSelReg->Attr.n.u1Unusable);
6744 }
6745 }
6746
6747 /* LDTR. */
6748 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6749 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6750 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6751 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6752 {
6753 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6754 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6755 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6756 }
6757 else
6758 {
6759 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6760 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6761 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6762 }
6763
6764 /* TR. */
6765 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6766 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6767 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6768 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6769 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6770 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6771 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6772
6773 /* GDTR. */
6774 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6775 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6776
6777 /* IDTR. */
6778 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6779 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6780}
6781
6782
6783/**
6784 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
6785 *
6786 * @returns VBox status code.
6787 * @param pVCpu The cross context virtual CPU structure.
6788 * @param pszInstr The VMX instruction name (for logging purposes).
6789 */
6790IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6791{
6792 /*
6793 * Load guest MSRs.
6794 * See Intel spec. 26.4 "Loading MSRs".
6795 */
6796 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6797 const char *const pszFailure = "VM-exit";
6798
6799 /*
6800 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6801 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6802 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6803 */
6804 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6805 if (!cMsrs)
6806 return VINF_SUCCESS;
6807
6808 /*
6809 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6810 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6811 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6812 */
6813 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6814 if (fIsMsrCountValid)
6815 { /* likely */ }
6816 else
6817 {
6818 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6819 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6820 }
6821
6822 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
6823 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
6824 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
6825 if (RT_SUCCESS(rc))
6826 {
6827 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
6828 Assert(pMsr);
6829 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6830 {
6831 if ( !pMsr->u32Reserved
6832 && pMsr->u32Msr != MSR_K8_FS_BASE
6833 && pMsr->u32Msr != MSR_K8_GS_BASE
6834 && pMsr->u32Msr != MSR_K6_EFER
6835 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6836 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6837 {
6838 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6839 if (rcStrict == VINF_SUCCESS)
6840 continue;
6841
6842 /*
6843 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6844 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6845 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
6846 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6847 * MSR in ring-0 if possible, or come up with a better, generic solution.
6848 */
6849 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6850 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6851 ? kVmxVDiag_Vmentry_MsrLoadRing3
6852 : kVmxVDiag_Vmentry_MsrLoad;
6853 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6854 }
6855 else
6856 {
6857 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6858 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6859 }
6860 }
6861 }
6862 else
6863 {
6864 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
6865 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6866 }
6867
6868 NOREF(pszInstr);
6869 NOREF(pszFailure);
6870 return VINF_SUCCESS;
6871}
6872
6873
6874/**
6875 * Loads the guest-state non-register state as part of VM-entry.
6876 *
6877 * @returns VBox status code.
6878 * @param pVCpu The cross context virtual CPU structure.
6879 *
6880 * @remarks This must be called only after loading the nested-guest register state
6881 * (especially nested-guest RIP).
6882 */
6883IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6884{
6885 /*
6886 * Load guest non-register state.
6887 * See Intel spec. 26.6 "Special Features of VM Entry"
6888 */
6889 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6890 bool const fEntryVectoring = HMVmxIsVmentryVectoring(pVmcs->u32EntryIntInfo, NULL /* puEntryIntInfoType */);
6891 if (!fEntryVectoring)
6892 {
6893 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6894 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
6895 else
6896 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6897
6898 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6899 }
6900 else
6901 {
6902 /* When the VM-entry is not vectoring, there is no blocking by STI or Mov-SS. */
6903 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6904 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6905 }
6906
6907 /* NMI blocking. */
6908 if ( (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6909 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6910 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6911
6912 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6913 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6914
6915 /* VPID is irrelevant. We don't support VPID yet. */
6916
6917 /* Clear address-range monitoring. */
6918 EMMonitorWaitClear(pVCpu);
6919}
6920
6921
6922/**
6923 * Loads the guest-state as part of VM-entry.
6924 *
6925 * @returns VBox status code.
6926 * @param pVCpu The cross context virtual CPU structure.
6927 * @param pszInstr The VMX instruction name (for logging purposes).
6928 *
6929 * @remarks This must be done after all the necessary steps prior to loading of
6930 * guest-state (e.g. checking various VMCS state).
6931 */
6932IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6933{
6934 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6935 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6936
6937 /*
6938 * Load guest RIP, RSP and RFLAGS.
6939 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6940 */
6941 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6942 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6943 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6944 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6945
6946 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6947 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6948 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6949
6950 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6951
6952 NOREF(pszInstr);
6953 return VINF_SUCCESS;
6954}
6955
6956
6957/**
6958 * Returns whether there are is a pending debug exception on VM-entry.
6959 *
6960 * @param pVCpu The cross context virtual CPU structure.
6961 * @param pszInstr The VMX instruction name (for logging purposes).
6962 */
6963IEM_STATIC bool iemVmxVmentryIsPendingDebugXcpt(PVMCPU pVCpu, const char *pszInstr)
6964{
6965 /*
6966 * Pending debug exceptions.
6967 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
6968 */
6969 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6970 Assert(pVmcs);
6971
6972 bool fPendingDbgXcpt = RT_BOOL(pVmcs->u64GuestPendingDbgXcpt.u & ( VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS
6973 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP));
6974 if (fPendingDbgXcpt)
6975 {
6976 uint8_t uEntryIntInfoType;
6977 bool const fEntryVectoring = HMVmxIsVmentryVectoring(pVmcs->u32EntryIntInfo, &uEntryIntInfoType);
6978 if (fEntryVectoring)
6979 {
6980 switch (uEntryIntInfoType)
6981 {
6982 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
6983 case VMX_ENTRY_INT_INFO_TYPE_NMI:
6984 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
6985 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
6986 fPendingDbgXcpt = false;
6987 break;
6988
6989 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
6990 {
6991 /*
6992 * Whether the pending debug exception for software exceptions other than
6993 * #BP and #OF is delivered after injecting the exception or is discard
6994 * is CPU implementation specific. We will discard them (easier).
6995 */
6996 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
6997 if ( uVector != X86_XCPT_BP
6998 && uVector != X86_XCPT_OF)
6999 fPendingDbgXcpt = false;
7000 RT_FALL_THRU();
7001 }
7002 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
7003 {
7004 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
7005 fPendingDbgXcpt = false;
7006 break;
7007 }
7008 }
7009 }
7010 else
7011 {
7012 /*
7013 * When the VM-entry is not vectoring but there is blocking-by-MovSS, whether the
7014 * pending debug exception is held pending or is discarded is CPU implementation
7015 * specific. We will discard them (easier).
7016 */
7017 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
7018 fPendingDbgXcpt = false;
7019
7020 /* There's no pending debug exception in the shutdown or wait-for-SIPI state. */
7021 if (pVmcs->u32GuestActivityState & (VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN | VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT))
7022 fPendingDbgXcpt = false;
7023 }
7024 }
7025
7026 NOREF(pszInstr);
7027 return fPendingDbgXcpt;
7028}
7029
7030
7031/**
7032 * Set up the monitor-trap flag (MTF).
7033 *
7034 * @param pVCpu The cross context virtual CPU structure.
7035 * @param pszInstr The VMX instruction name (for logging purposes).
7036 */
7037IEM_STATIC void iemVmxVmentrySetupMtf(PVMCPU pVCpu, const char *pszInstr)
7038{
7039 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7040 Assert(pVmcs);
7041 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
7042 {
7043 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7044 Log(("%s: Monitor-trap flag set on VM-entry\n", pszInstr));
7045 }
7046 else
7047 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
7048 NOREF(pszInstr);
7049}
7050
7051
7052/**
7053 * Set up the VMX-preemption timer.
7054 *
7055 * @param pVCpu The cross context virtual CPU structure.
7056 * @param pszInstr The VMX instruction name (for logging purposes).
7057 */
7058IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
7059{
7060 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7061 Assert(pVmcs);
7062 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
7063 {
7064 uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
7065 pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
7066 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
7067
7068 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
7069 }
7070 else
7071 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
7072
7073 NOREF(pszInstr);
7074}
7075
7076
7077/**
7078 * Performs event injection (if any) as part of VM-entry.
7079 *
7080 * @param pVCpu The cross context virtual CPU structure.
7081 * @param pszInstr The VMX instruction name (for logging purposes).
7082 */
7083IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
7084{
7085 /*
7086 * Inject events.
7087 * See Intel spec. 26.5 "Event Injection".
7088 */
7089 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7090 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
7091 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
7092 {
7093 /*
7094 * The event that is going to be made pending for injection is not subject to VMX intercepts,
7095 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
7096 * of the current event -are- subject to intercepts, hence this flag will be flipped during
7097 * the actually delivery of this event.
7098 */
7099 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
7100
7101 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
7102 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
7103 {
7104 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
7105 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7106 return VINF_SUCCESS;
7107 }
7108
7109 return HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
7110 pVCpu->cpum.GstCtx.cr2);
7111 }
7112
7113 /*
7114 * Inject any pending guest debug exception.
7115 * Unlike injecting events, this #DB injection on VM-entry is subject to #DB VMX intercept.
7116 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7117 */
7118 bool const fPendingDbgXcpt = iemVmxVmentryIsPendingDebugXcpt(pVCpu, pszInstr);
7119 if (fPendingDbgXcpt)
7120 {
7121 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
7122 uint32_t const uDbgXcptInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
7123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
7124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7125 return HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uDbgXcptInfo, 0 /* uErrCode */, pVmcs->u32EntryInstrLen,
7126 0 /* GCPtrFaultAddress */);
7127 }
7128
7129 NOREF(pszInstr);
7130 return VINF_SUCCESS;
7131}
7132
7133
7134/**
7135 * VMLAUNCH/VMRESUME instruction execution worker.
7136 *
7137 * @returns Strict VBox status code.
7138 * @param pVCpu The cross context virtual CPU structure.
7139 * @param cbInstr The instruction length in bytes.
7140 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
7141 * VMXINSTRID_VMRESUME).
7142 *
7143 * @remarks Common VMX instruction checks are already expected to by the caller,
7144 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7145 */
7146IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
7147{
7148# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7149 RT_NOREF3(pVCpu, cbInstr, uInstrId);
7150 return VINF_EM_RAW_EMULATE_INSTR;
7151# else
7152 Assert( uInstrId == VMXINSTRID_VMLAUNCH
7153 || uInstrId == VMXINSTRID_VMRESUME);
7154 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
7155
7156 /* Nested-guest intercept. */
7157 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7158 return iemVmxVmexitInstr(pVCpu, uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH, cbInstr);
7159
7160 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7161
7162 /* CPL. */
7163 if (pVCpu->iem.s.uCpl > 0)
7164 {
7165 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
7166 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
7167 return iemRaiseGeneralProtectionFault0(pVCpu);
7168 }
7169
7170 /* Current VMCS valid. */
7171 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7172 {
7173 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7174 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
7175 iemVmxVmFailInvalid(pVCpu);
7176 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7177 return VINF_SUCCESS;
7178 }
7179
7180 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
7181 * use block-by-STI here which is not quite correct. */
7182 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7183 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
7184 {
7185 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
7186 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
7187 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
7188 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7189 return VINF_SUCCESS;
7190 }
7191
7192 if (uInstrId == VMXINSTRID_VMLAUNCH)
7193 {
7194 /* VMLAUNCH with non-clear VMCS. */
7195 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
7196 { /* likely */ }
7197 else
7198 {
7199 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
7200 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
7201 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
7202 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7203 return VINF_SUCCESS;
7204 }
7205 }
7206 else
7207 {
7208 /* VMRESUME with non-launched VMCS. */
7209 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
7210 { /* likely */ }
7211 else
7212 {
7213 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7214 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7215 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7216 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7217 return VINF_SUCCESS;
7218 }
7219 }
7220
7221 /*
7222 * Load the current VMCS.
7223 */
7224 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7225 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
7226 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
7227 if (RT_FAILURE(rc))
7228 {
7229 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
7230 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
7231 return rc;
7232 }
7233
7234 /*
7235 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7236 * while entering VMX non-root mode. We do some of this while checking VM-execution
7237 * controls. The guest hypervisor should not make assumptions and cannot expect
7238 * predictable behavior if changes to these structures are made in guest memory while
7239 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7240 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
7241 *
7242 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7243 */
7244 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
7245 if (RT_SUCCESS(rc))
7246 {
7247 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
7248 if (RT_SUCCESS(rc))
7249 {
7250 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
7251 if (RT_SUCCESS(rc))
7252 {
7253 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7254 if (RT_SUCCESS(rc))
7255 {
7256 /* Initialize the VM-exit qualification field as it MBZ for VM-exits where it isn't specified. */
7257 iemVmxVmcsSetExitQual(pVCpu, 0);
7258
7259 /*
7260 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
7261 * So we save the required force flags here (currently only VMCPU_FF_BLOCK_NMI) so we
7262 * can restore it on VM-exit when required.
7263 */
7264 iemVmxVmentrySaveForceFlags(pVCpu);
7265
7266 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7267 if (RT_SUCCESS(rc))
7268 {
7269 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7270 if (RT_SUCCESS(rc))
7271 {
7272 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7273 if (RT_SUCCESS(rc))
7274 {
7275 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7276
7277 /* VMLAUNCH instruction must update the VMCS launch state. */
7278 if (uInstrId == VMXINSTRID_VMLAUNCH)
7279 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
7280
7281 /* Perform the VMX transition (PGM updates). */
7282 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
7283 if (rcStrict == VINF_SUCCESS)
7284 { /* likely */ }
7285 else if (RT_SUCCESS(rcStrict))
7286 {
7287 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
7288 VBOXSTRICTRC_VAL(rcStrict)));
7289 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7290 }
7291 else
7292 {
7293 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7294 return rcStrict;
7295 }
7296
7297 /* We've now entered nested-guest execution. */
7298 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7299
7300 /*
7301 * The priority of potential VM-exits during VM-entry is important.
7302 * The priorities of VM-exits and events are listed from highest
7303 * to lowest as follows:
7304 *
7305 * 1. Event injection.
7306 * 2. Trap on task-switch (T flag set in TSS).
7307 * 3. TPR below threshold / APIC-write.
7308 * 4. SMI, INIT.
7309 * 5. MTF exit.
7310 * 6. Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
7311 * 7. VMX-preemption timer.
7312 * 9. NMI-window exit.
7313 * 10. NMI injection.
7314 * 11. Interrupt-window exit.
7315 * 12. Virtual-interrupt injection.
7316 * 13. Interrupt injection.
7317 * 14. Process next instruction (fetch, decode, execute).
7318 */
7319
7320 /* Setup the VMX-preemption timer. */
7321 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7322
7323 /* Setup monitor-trap flag. */
7324 iemVmxVmentrySetupMtf(pVCpu, pszInstr);
7325
7326 /* Now that we've switched page tables, we can inject events if any. */
7327 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
7328
7329 return VINF_SUCCESS;
7330 }
7331 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
7332 }
7333 }
7334 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
7335 }
7336
7337 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
7338 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7339 return VINF_SUCCESS;
7340 }
7341 }
7342 }
7343
7344 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
7345 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7346 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr);
7347# endif
7348}
7349
7350
7351/**
7352 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
7353 * (causes a VM-exit) or not.
7354 *
7355 * @returns @c true if the instruction is intercepted, @c false otherwise.
7356 * @param pVCpu The cross context virtual CPU structure.
7357 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
7358 * VMX_EXIT_WRMSR).
7359 * @param idMsr The MSR.
7360 */
7361IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
7362{
7363 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7364 Assert( uExitReason == VMX_EXIT_RDMSR
7365 || uExitReason == VMX_EXIT_WRMSR);
7366
7367 /* Consult the MSR bitmap if the feature is supported. */
7368 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7369 Assert(pVmcs);
7370 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7371 {
7372 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
7373 if (uExitReason == VMX_EXIT_RDMSR)
7374 {
7375 VMXMSREXITREAD enmRead;
7376 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
7377 NULL /* penmWrite */);
7378 AssertRC(rc);
7379 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
7380 return true;
7381 }
7382 else
7383 {
7384 VMXMSREXITWRITE enmWrite;
7385 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
7386 &enmWrite);
7387 AssertRC(rc);
7388 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
7389 return true;
7390 }
7391 return false;
7392 }
7393
7394 /* Without MSR bitmaps, all MSR accesses are intercepted. */
7395 return true;
7396}
7397
7398
7399/**
7400 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
7401 * intercepted (causes a VM-exit) or not.
7402 *
7403 * @returns @c true if the instruction is intercepted, @c false otherwise.
7404 * @param pVCpu The cross context virtual CPU structure.
7405 * @param u64FieldEnc The VMCS field encoding.
7406 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
7407 * VMX_EXIT_VMREAD).
7408 */
7409IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
7410{
7411 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7412 Assert( uExitReason == VMX_EXIT_VMREAD
7413 || uExitReason == VMX_EXIT_VMWRITE);
7414
7415 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
7416 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
7417 return true;
7418
7419 /*
7420 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
7421 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
7422 */
7423 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
7424 return true;
7425
7426 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
7427 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
7428 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
7429 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
7430 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
7431 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
7432 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
7433 pbBitmap += (u32FieldEnc >> 3);
7434 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
7435 return true;
7436
7437 return false;
7438}
7439
7440
7441/**
7442 * VMREAD common (memory/register) instruction execution worker
7443 *
7444 * @returns Strict VBox status code.
7445 * @param pVCpu The cross context virtual CPU structure.
7446 * @param cbInstr The instruction length in bytes.
7447 * @param pu64Dst Where to write the VMCS value (only updated when
7448 * VINF_SUCCESS is returned).
7449 * @param u64FieldEnc The VMCS field encoding.
7450 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7451 * be NULL.
7452 */
7453IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7454 PCVMXVEXITINFO pExitInfo)
7455{
7456 /* Nested-guest intercept. */
7457 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7458 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
7459 {
7460 if (pExitInfo)
7461 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7462 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
7463 }
7464
7465 /* CPL. */
7466 if (pVCpu->iem.s.uCpl > 0)
7467 {
7468 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7469 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
7470 return iemRaiseGeneralProtectionFault0(pVCpu);
7471 }
7472
7473 /* VMCS pointer in root mode. */
7474 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7475 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7476 {
7477 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7478 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
7479 iemVmxVmFailInvalid(pVCpu);
7480 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7481 return VINF_SUCCESS;
7482 }
7483
7484 /* VMCS-link pointer in non-root mode. */
7485 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7486 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7487 {
7488 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7489 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
7490 iemVmxVmFailInvalid(pVCpu);
7491 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7492 return VINF_SUCCESS;
7493 }
7494
7495 /* Supported VMCS field. */
7496 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7497 {
7498 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7499 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
7500 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
7501 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7502 return VINF_SUCCESS;
7503 }
7504
7505 /*
7506 * Setup reading from the current or shadow VMCS.
7507 */
7508 uint8_t *pbVmcs;
7509 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7510 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7511 else
7512 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7513 Assert(pbVmcs);
7514
7515 VMXVMCSFIELDENC FieldEnc;
7516 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7517 uint8_t const uWidth = FieldEnc.n.u2Width;
7518 uint8_t const uType = FieldEnc.n.u2Type;
7519 uint8_t const uWidthType = (uWidth << 2) | uType;
7520 uint8_t const uIndex = FieldEnc.n.u8Index;
7521 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7522 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7523
7524 /*
7525 * Read the VMCS component based on the field's effective width.
7526 *
7527 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7528 * indicates high bits (little endian).
7529 *
7530 * Note! The caller is responsible to trim the result and update registers
7531 * or memory locations are required. Here we just zero-extend to the largest
7532 * type (i.e. 64-bits).
7533 */
7534 uint8_t *pbField = pbVmcs + offField;
7535 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7536 switch (uEffWidth)
7537 {
7538 case VMX_VMCS_ENC_WIDTH_64BIT:
7539 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
7540 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
7541 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
7542 }
7543 return VINF_SUCCESS;
7544}
7545
7546
7547/**
7548 * VMREAD (64-bit register) instruction execution worker.
7549 *
7550 * @returns Strict VBox status code.
7551 * @param pVCpu The cross context virtual CPU structure.
7552 * @param cbInstr The instruction length in bytes.
7553 * @param pu64Dst Where to store the VMCS field's value.
7554 * @param u64FieldEnc The VMCS field encoding.
7555 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7556 * be NULL.
7557 */
7558IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7559 PCVMXVEXITINFO pExitInfo)
7560{
7561 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
7562 if (rcStrict == VINF_SUCCESS)
7563 {
7564 iemVmxVmreadSuccess(pVCpu, cbInstr);
7565 return VINF_SUCCESS;
7566 }
7567
7568 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7569 return rcStrict;
7570}
7571
7572
7573/**
7574 * VMREAD (32-bit register) instruction execution worker.
7575 *
7576 * @returns Strict VBox status code.
7577 * @param pVCpu The cross context virtual CPU structure.
7578 * @param cbInstr The instruction length in bytes.
7579 * @param pu32Dst Where to store the VMCS field's value.
7580 * @param u32FieldEnc The VMCS field encoding.
7581 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7582 * be NULL.
7583 */
7584IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
7585 PCVMXVEXITINFO pExitInfo)
7586{
7587 uint64_t u64Dst;
7588 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
7589 if (rcStrict == VINF_SUCCESS)
7590 {
7591 *pu32Dst = u64Dst;
7592 iemVmxVmreadSuccess(pVCpu, cbInstr);
7593 return VINF_SUCCESS;
7594 }
7595
7596 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7597 return rcStrict;
7598}
7599
7600
7601/**
7602 * VMREAD (memory) instruction execution worker.
7603 *
7604 * @returns Strict VBox status code.
7605 * @param pVCpu The cross context virtual CPU structure.
7606 * @param cbInstr The instruction length in bytes.
7607 * @param iEffSeg The effective segment register to use with @a u64Val.
7608 * Pass UINT8_MAX if it is a register access.
7609 * @param enmEffAddrMode The effective addressing mode (only used with memory
7610 * operand).
7611 * @param GCPtrDst The guest linear address to store the VMCS field's
7612 * value.
7613 * @param u64FieldEnc The VMCS field encoding.
7614 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7615 * be NULL.
7616 */
7617IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
7618 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7619{
7620 uint64_t u64Dst;
7621 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
7622 if (rcStrict == VINF_SUCCESS)
7623 {
7624 /*
7625 * Write the VMCS field's value to the location specified in guest-memory.
7626 *
7627 * The pointer size depends on the address size (address-size prefix allowed).
7628 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
7629 */
7630 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7631 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7632 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
7633
7634 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7635 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7636 else
7637 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7638 if (rcStrict == VINF_SUCCESS)
7639 {
7640 iemVmxVmreadSuccess(pVCpu, cbInstr);
7641 return VINF_SUCCESS;
7642 }
7643
7644 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7645 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7646 return rcStrict;
7647 }
7648
7649 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7650 return rcStrict;
7651}
7652
7653
7654/**
7655 * VMWRITE instruction execution worker.
7656 *
7657 * @returns Strict VBox status code.
7658 * @param pVCpu The cross context virtual CPU structure.
7659 * @param cbInstr The instruction length in bytes.
7660 * @param iEffSeg The effective segment register to use with @a u64Val.
7661 * Pass UINT8_MAX if it is a register access.
7662 * @param enmEffAddrMode The effective addressing mode (only used with memory
7663 * operand).
7664 * @param u64Val The value to write (or guest linear address to the
7665 * value), @a iEffSeg will indicate if it's a memory
7666 * operand.
7667 * @param u64FieldEnc The VMCS field encoding.
7668 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7669 * be NULL.
7670 */
7671IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
7672 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7673{
7674 /* Nested-guest intercept. */
7675 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7676 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
7677 {
7678 if (pExitInfo)
7679 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7680 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7681 }
7682
7683 /* CPL. */
7684 if (pVCpu->iem.s.uCpl > 0)
7685 {
7686 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7687 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7688 return iemRaiseGeneralProtectionFault0(pVCpu);
7689 }
7690
7691 /* VMCS pointer in root mode. */
7692 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7693 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7694 {
7695 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7696 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7697 iemVmxVmFailInvalid(pVCpu);
7698 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7699 return VINF_SUCCESS;
7700 }
7701
7702 /* VMCS-link pointer in non-root mode. */
7703 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7704 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7705 {
7706 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7707 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7708 iemVmxVmFailInvalid(pVCpu);
7709 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7710 return VINF_SUCCESS;
7711 }
7712
7713 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7714 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7715 if (!fIsRegOperand)
7716 {
7717 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7718 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7719 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
7720
7721 /* Read the value from the specified guest memory location. */
7722 VBOXSTRICTRC rcStrict;
7723 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7724 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7725 else
7726 {
7727 uint32_t u32Val;
7728 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7729 u64Val = u32Val;
7730 }
7731 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7732 {
7733 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7734 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7735 return rcStrict;
7736 }
7737 }
7738 else
7739 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7740
7741 /* Supported VMCS field. */
7742 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7743 {
7744 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7745 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7746 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7747 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7748 return VINF_SUCCESS;
7749 }
7750
7751 /* Read-only VMCS field. */
7752 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
7753 if ( fIsFieldReadOnly
7754 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7755 {
7756 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
7757 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7758 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7759 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7760 return VINF_SUCCESS;
7761 }
7762
7763 /*
7764 * Setup writing to the current or shadow VMCS.
7765 */
7766 uint8_t *pbVmcs;
7767 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7768 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7769 else
7770 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7771 Assert(pbVmcs);
7772
7773 VMXVMCSFIELDENC FieldEnc;
7774 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7775 uint8_t const uWidth = FieldEnc.n.u2Width;
7776 uint8_t const uType = FieldEnc.n.u2Type;
7777 uint8_t const uWidthType = (uWidth << 2) | uType;
7778 uint8_t const uIndex = FieldEnc.n.u8Index;
7779 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7780 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7781
7782 /*
7783 * Write the VMCS component based on the field's effective width.
7784 *
7785 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7786 * indicates high bits (little endian).
7787 */
7788 uint8_t *pbField = pbVmcs + offField;
7789 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7790 switch (uEffWidth)
7791 {
7792 case VMX_VMCS_ENC_WIDTH_64BIT:
7793 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7794 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7795 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7796 }
7797
7798 iemVmxVmSucceed(pVCpu);
7799 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7800 return VINF_SUCCESS;
7801}
7802
7803
7804/**
7805 * VMCLEAR instruction execution worker.
7806 *
7807 * @returns Strict VBox status code.
7808 * @param pVCpu The cross context virtual CPU structure.
7809 * @param cbInstr The instruction length in bytes.
7810 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7811 * @param GCPtrVmcs The linear address of the VMCS pointer.
7812 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7813 * be NULL.
7814 *
7815 * @remarks Common VMX instruction checks are already expected to by the caller,
7816 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7817 */
7818IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7819 PCVMXVEXITINFO pExitInfo)
7820{
7821 /* Nested-guest intercept. */
7822 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7823 {
7824 if (pExitInfo)
7825 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7826 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7827 }
7828
7829 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7830
7831 /* CPL. */
7832 if (pVCpu->iem.s.uCpl > 0)
7833 {
7834 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7835 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7836 return iemRaiseGeneralProtectionFault0(pVCpu);
7837 }
7838
7839 /* Get the VMCS pointer from the location specified by the source memory operand. */
7840 RTGCPHYS GCPhysVmcs;
7841 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7842 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7843 {
7844 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7845 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7846 return rcStrict;
7847 }
7848
7849 /* VMCS pointer alignment. */
7850 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7851 {
7852 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7853 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7854 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7855 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7856 return VINF_SUCCESS;
7857 }
7858
7859 /* VMCS physical-address width limits. */
7860 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7861 {
7862 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7863 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7864 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7865 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7866 return VINF_SUCCESS;
7867 }
7868
7869 /* VMCS is not the VMXON region. */
7870 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7871 {
7872 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7873 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7874 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7875 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7876 return VINF_SUCCESS;
7877 }
7878
7879 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7880 restriction imposed by our implementation. */
7881 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7882 {
7883 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7884 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7885 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7886 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7887 return VINF_SUCCESS;
7888 }
7889
7890 /*
7891 * VMCLEAR allows committing and clearing any valid VMCS pointer.
7892 *
7893 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
7894 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
7895 * to 'clear'.
7896 */
7897 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
7898 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
7899 {
7900 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
7901 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7902 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
7903 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7904 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7905 }
7906 else
7907 {
7908 AssertCompileMemberSize(VMXVVMCS, fVmcsState, sizeof(fVmcsStateClear));
7909 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
7910 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
7911 if (RT_FAILURE(rcStrict))
7912 return rcStrict;
7913 }
7914
7915 iemVmxVmSucceed(pVCpu);
7916 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7917 return VINF_SUCCESS;
7918}
7919
7920
7921/**
7922 * VMPTRST instruction execution worker.
7923 *
7924 * @returns Strict VBox status code.
7925 * @param pVCpu The cross context virtual CPU structure.
7926 * @param cbInstr The instruction length in bytes.
7927 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7928 * @param GCPtrVmcs The linear address of where to store the current VMCS
7929 * pointer.
7930 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7931 * be NULL.
7932 *
7933 * @remarks Common VMX instruction checks are already expected to by the caller,
7934 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7935 */
7936IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7937 PCVMXVEXITINFO pExitInfo)
7938{
7939 /* Nested-guest intercept. */
7940 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7941 {
7942 if (pExitInfo)
7943 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7944 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
7945 }
7946
7947 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7948
7949 /* CPL. */
7950 if (pVCpu->iem.s.uCpl > 0)
7951 {
7952 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7953 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
7954 return iemRaiseGeneralProtectionFault0(pVCpu);
7955 }
7956
7957 /* Set the VMCS pointer to the location specified by the destination memory operand. */
7958 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
7959 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
7960 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7961 {
7962 iemVmxVmSucceed(pVCpu);
7963 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7964 return rcStrict;
7965 }
7966
7967 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7968 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
7969 return rcStrict;
7970}
7971
7972
7973/**
7974 * VMPTRLD instruction execution worker.
7975 *
7976 * @returns Strict VBox status code.
7977 * @param pVCpu The cross context virtual CPU structure.
7978 * @param cbInstr The instruction length in bytes.
7979 * @param GCPtrVmcs The linear address of the current VMCS pointer.
7980 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7981 * be NULL.
7982 *
7983 * @remarks Common VMX instruction checks are already expected to by the caller,
7984 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7985 */
7986IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7987 PCVMXVEXITINFO pExitInfo)
7988{
7989 /* Nested-guest intercept. */
7990 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7991 {
7992 if (pExitInfo)
7993 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7994 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
7995 }
7996
7997 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7998
7999 /* CPL. */
8000 if (pVCpu->iem.s.uCpl > 0)
8001 {
8002 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8003 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
8004 return iemRaiseGeneralProtectionFault0(pVCpu);
8005 }
8006
8007 /* Get the VMCS pointer from the location specified by the source memory operand. */
8008 RTGCPHYS GCPhysVmcs;
8009 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8010 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
8011 {
8012 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8013 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
8014 return rcStrict;
8015 }
8016
8017 /* VMCS pointer alignment. */
8018 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
8019 {
8020 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
8021 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
8022 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8023 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8024 return VINF_SUCCESS;
8025 }
8026
8027 /* VMCS physical-address width limits. */
8028 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
8029 {
8030 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8031 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
8032 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8033 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8034 return VINF_SUCCESS;
8035 }
8036
8037 /* VMCS is not the VMXON region. */
8038 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8039 {
8040 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8041 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
8042 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
8043 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8044 return VINF_SUCCESS;
8045 }
8046
8047 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8048 restriction imposed by our implementation. */
8049 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8050 {
8051 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
8052 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
8053 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8054 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8055 return VINF_SUCCESS;
8056 }
8057
8058 /* Read the VMCS revision ID from the VMCS. */
8059 VMXVMCSREVID VmcsRevId;
8060 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
8061 if (RT_FAILURE(rc))
8062 {
8063 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8064 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
8065 return rc;
8066 }
8067
8068 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
8069 also check VMCS shadowing feature. */
8070 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
8071 || ( VmcsRevId.n.fIsShadowVmcs
8072 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
8073 {
8074 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
8075 {
8076 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
8077 VmcsRevId.n.u31RevisionId));
8078 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
8079 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8080 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8081 return VINF_SUCCESS;
8082 }
8083
8084 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
8085 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
8086 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8087 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8088 return VINF_SUCCESS;
8089 }
8090
8091 /*
8092 * We maintain only the cache of the current VMCS in CPUMCTX. Therefore, VMPTRLD shall
8093 * always flush the cache contents of any existing, current VMCS back to guest memory
8094 * before loading a new VMCS as current.
8095 */
8096 if ( IEM_VMX_HAS_CURRENT_VMCS(pVCpu)
8097 && IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
8098 iemVmxCommitCurrentVmcsToMemory(pVCpu);
8099
8100 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
8101
8102 iemVmxVmSucceed(pVCpu);
8103 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8104 return VINF_SUCCESS;
8105}
8106
8107
8108/**
8109 * VMXON instruction execution worker.
8110 *
8111 * @returns Strict VBox status code.
8112 * @param pVCpu The cross context virtual CPU structure.
8113 * @param cbInstr The instruction length in bytes.
8114 * @param iEffSeg The effective segment register to use with @a
8115 * GCPtrVmxon.
8116 * @param GCPtrVmxon The linear address of the VMXON pointer.
8117 * @param pExitInfo Pointer to the VM-exit instruction information struct.
8118 * Optional, can be NULL.
8119 *
8120 * @remarks Common VMX instruction checks are already expected to by the caller,
8121 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8122 */
8123IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
8124 PCVMXVEXITINFO pExitInfo)
8125{
8126 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
8127 {
8128 /* CPL. */
8129 if (pVCpu->iem.s.uCpl > 0)
8130 {
8131 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8132 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
8133 return iemRaiseGeneralProtectionFault0(pVCpu);
8134 }
8135
8136 /* A20M (A20 Masked) mode. */
8137 if (!PGMPhysIsA20Enabled(pVCpu))
8138 {
8139 Log(("vmxon: A20M mode -> #GP(0)\n"));
8140 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
8141 return iemRaiseGeneralProtectionFault0(pVCpu);
8142 }
8143
8144 /* CR0. */
8145 {
8146 /* CR0 MB1 bits. */
8147 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
8148 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
8149 {
8150 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
8151 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
8152 return iemRaiseGeneralProtectionFault0(pVCpu);
8153 }
8154
8155 /* CR0 MBZ bits. */
8156 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
8157 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
8158 {
8159 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
8160 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
8161 return iemRaiseGeneralProtectionFault0(pVCpu);
8162 }
8163 }
8164
8165 /* CR4. */
8166 {
8167 /* CR4 MB1 bits. */
8168 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
8169 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
8170 {
8171 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
8172 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
8173 return iemRaiseGeneralProtectionFault0(pVCpu);
8174 }
8175
8176 /* CR4 MBZ bits. */
8177 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
8178 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
8179 {
8180 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
8181 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
8182 return iemRaiseGeneralProtectionFault0(pVCpu);
8183 }
8184 }
8185
8186 /* Feature control MSR's LOCK and VMXON bits. */
8187 uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl;
8188 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8189 != (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8190 {
8191 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
8192 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
8193 return iemRaiseGeneralProtectionFault0(pVCpu);
8194 }
8195
8196 /* Get the VMXON pointer from the location specified by the source memory operand. */
8197 RTGCPHYS GCPhysVmxon;
8198 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
8199 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
8200 {
8201 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
8202 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
8203 return rcStrict;
8204 }
8205
8206 /* VMXON region pointer alignment. */
8207 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
8208 {
8209 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
8210 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
8211 iemVmxVmFailInvalid(pVCpu);
8212 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8213 return VINF_SUCCESS;
8214 }
8215
8216 /* VMXON physical-address width limits. */
8217 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
8218 {
8219 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
8220 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
8221 iemVmxVmFailInvalid(pVCpu);
8222 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8223 return VINF_SUCCESS;
8224 }
8225
8226 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
8227 restriction imposed by our implementation. */
8228 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
8229 {
8230 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
8231 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
8232 iemVmxVmFailInvalid(pVCpu);
8233 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8234 return VINF_SUCCESS;
8235 }
8236
8237 /* Read the VMCS revision ID from the VMXON region. */
8238 VMXVMCSREVID VmcsRevId;
8239 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
8240 if (RT_FAILURE(rc))
8241 {
8242 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
8243 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
8244 return rc;
8245 }
8246
8247 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
8248 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
8249 {
8250 /* Revision ID mismatch. */
8251 if (!VmcsRevId.n.fIsShadowVmcs)
8252 {
8253 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
8254 VmcsRevId.n.u31RevisionId));
8255 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
8256 iemVmxVmFailInvalid(pVCpu);
8257 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8258 return VINF_SUCCESS;
8259 }
8260
8261 /* Shadow VMCS disallowed. */
8262 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
8263 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
8264 iemVmxVmFailInvalid(pVCpu);
8265 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8266 return VINF_SUCCESS;
8267 }
8268
8269 /*
8270 * Record that we're in VMX operation, block INIT, block and disable A20M.
8271 */
8272 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
8273 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8274 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
8275
8276 /* Clear address-range monitoring. */
8277 EMMonitorWaitClear(pVCpu);
8278 /** @todo NSTVMX: Intel PT. */
8279
8280 iemVmxVmSucceed(pVCpu);
8281 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8282 return VINF_SUCCESS;
8283 }
8284 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8285 {
8286 /* Nested-guest intercept. */
8287 if (pExitInfo)
8288 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8289 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
8290 }
8291
8292 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8293
8294 /* CPL. */
8295 if (pVCpu->iem.s.uCpl > 0)
8296 {
8297 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8298 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
8299 return iemRaiseGeneralProtectionFault0(pVCpu);
8300 }
8301
8302 /* VMXON when already in VMX root mode. */
8303 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
8304 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
8305 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8306 return VINF_SUCCESS;
8307}
8308
8309
8310/**
8311 * Implements 'VMXOFF'.
8312 *
8313 * @remarks Common VMX instruction checks are already expected to by the caller,
8314 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8315 */
8316IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
8317{
8318 /* Nested-guest intercept. */
8319 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8320 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
8321
8322 /* CPL. */
8323 if (pVCpu->iem.s.uCpl > 0)
8324 {
8325 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8326 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
8327 return iemRaiseGeneralProtectionFault0(pVCpu);
8328 }
8329
8330 /* Dual monitor treatment of SMIs and SMM. */
8331 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
8332 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
8333 {
8334 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
8335 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8336 return VINF_SUCCESS;
8337 }
8338
8339 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
8340 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
8341 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
8342
8343 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
8344 { /** @todo NSTVMX: Unblock SMI. */ }
8345
8346 EMMonitorWaitClear(pVCpu);
8347 /** @todo NSTVMX: Unblock and enable A20M. */
8348
8349 iemVmxVmSucceed(pVCpu);
8350 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8351 return VINF_SUCCESS;
8352}
8353
8354
8355/**
8356 * Implements 'VMXON'.
8357 */
8358IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
8359{
8360 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
8361}
8362
8363
8364/**
8365 * Implements 'VMLAUNCH'.
8366 */
8367IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
8368{
8369 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH);
8370}
8371
8372
8373/**
8374 * Implements 'VMRESUME'.
8375 */
8376IEM_CIMPL_DEF_0(iemCImpl_vmresume)
8377{
8378 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME);
8379}
8380
8381
8382/**
8383 * Implements 'VMPTRLD'.
8384 */
8385IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8386{
8387 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8388}
8389
8390
8391/**
8392 * Implements 'VMPTRST'.
8393 */
8394IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8395{
8396 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8397}
8398
8399
8400/**
8401 * Implements 'VMCLEAR'.
8402 */
8403IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8404{
8405 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8406}
8407
8408
8409/**
8410 * Implements 'VMWRITE' register.
8411 */
8412IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
8413{
8414 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
8415 NULL /* pExitInfo */);
8416}
8417
8418
8419/**
8420 * Implements 'VMWRITE' memory.
8421 */
8422IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
8423{
8424 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
8425}
8426
8427
8428/**
8429 * Implements 'VMREAD' 64-bit register.
8430 */
8431IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
8432{
8433 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
8434}
8435
8436
8437/**
8438 * Implements 'VMREAD' 32-bit register.
8439 */
8440IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
8441{
8442 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
8443}
8444
8445
8446/**
8447 * Implements 'VMREAD' memory.
8448 */
8449IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
8450{
8451 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
8452}
8453
8454
8455/**
8456 * Implements VMX's implementation of PAUSE.
8457 */
8458IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
8459{
8460 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8461 {
8462 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
8463 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8464 return rcStrict;
8465 }
8466
8467 /*
8468 * Outside VMX non-root operation or if the PAUSE instruction does not cause
8469 * a VM-exit, the instruction operates normally.
8470 */
8471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8472 return VINF_SUCCESS;
8473}
8474
8475#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8476
8477
8478/**
8479 * Implements 'VMCALL'.
8480 */
8481IEM_CIMPL_DEF_0(iemCImpl_vmcall)
8482{
8483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8484 /* Nested-guest intercept. */
8485 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8486 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
8487#endif
8488
8489 /* Join forces with vmmcall. */
8490 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
8491}
8492
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette