VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97281

最後變更 在這個檔案從97281是 97281,由 vboxsync 提交於 2 年 前

VMM/cpumctx.h: Set CPUMX86EFLAGS_HW_BITS to 24 as there seems to be no clear performance difference to 32. This should allow IEM and others to get away with more efficient encoding of RFLAGS/fIntInhibit updates later (see code comment).

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 522.0 KB
 
1/* $Id: VMXAllTemplate.cpp.h 97281 2022-10-24 14:58:21Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 return ( X86_CR0_PE
737 | X86_CR0_NE
738 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
739 | X86_CR0_PG
740 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
741}
742
743
744/**
745 * Gets the CR4 guest/host mask.
746 *
747 * These bits typically does not change through the lifetime of a VM. Any bit set in
748 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
749 * by the guest.
750 *
751 * @returns The CR4 guest/host mask.
752 * @param pVCpu The cross context virtual CPU structure.
753 */
754static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
755{
756 /*
757 * We construct a mask of all CR4 bits that the guest can modify without causing
758 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
759 * a VM-exit when the guest attempts to modify them when executing using
760 * hardware-assisted VMX.
761 *
762 * When a feature is not exposed to the guest (and may be present on the host),
763 * we want to intercept guest modifications to the bit so we can emulate proper
764 * behavior (e.g., #GP).
765 *
766 * Furthermore, only modifications to those bits that don't require immediate
767 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
768 * depends on CR3 which might not always be the guest value while executing
769 * using hardware-assisted VMX.
770 */
771 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
772 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
773#ifdef IN_NEM_DARWIN
774 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
775#endif
776 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
777
778 /*
779 * Paranoia.
780 * Ensure features exposed to the guest are present on the host.
781 */
782 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
783#ifdef IN_NEM_DARWIN
784 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
785#endif
786 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
787
788 uint64_t const fGstMask = X86_CR4_PVI
789 | X86_CR4_TSD
790 | X86_CR4_DE
791 | X86_CR4_MCE
792 | X86_CR4_PCE
793 | X86_CR4_OSXMMEEXCPT
794 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
795#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
796 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
797 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
798#endif
799 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
800 return ~fGstMask;
801}
802
803
804/**
805 * Adds one or more exceptions to the exception bitmap and commits it to the current
806 * VMCS.
807 *
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pVmxTransient The VMX-transient structure.
810 * @param uXcptMask The exception(s) to add.
811 */
812static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
813{
814 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
815 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
816 if ((uXcptBitmap & uXcptMask) != uXcptMask)
817 {
818 uXcptBitmap |= uXcptMask;
819 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
820 AssertRC(rc);
821 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
822 }
823}
824
825
826/**
827 * Adds an exception to the exception bitmap and commits it to the current VMCS.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param pVmxTransient The VMX-transient structure.
831 * @param uXcpt The exception to add.
832 */
833static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
834{
835 Assert(uXcpt <= X86_XCPT_LAST);
836 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
837}
838
839
840/**
841 * Remove one or more exceptions from the exception bitmap and commits it to the
842 * current VMCS.
843 *
844 * This takes care of not removing the exception intercept if a nested-guest
845 * requires the exception to be intercepted.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param pVmxTransient The VMX-transient structure.
850 * @param uXcptMask The exception(s) to remove.
851 */
852static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
853{
854 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
855 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
856 if (u32XcptBitmap & uXcptMask)
857 {
858#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
859 if (!pVmxTransient->fIsNestedGuest)
860 { /* likely */ }
861 else
862 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
863#endif
864#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
865 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
866 | RT_BIT(X86_XCPT_DE)
867 | RT_BIT(X86_XCPT_NM)
868 | RT_BIT(X86_XCPT_TS)
869 | RT_BIT(X86_XCPT_UD)
870 | RT_BIT(X86_XCPT_NP)
871 | RT_BIT(X86_XCPT_SS)
872 | RT_BIT(X86_XCPT_GP)
873 | RT_BIT(X86_XCPT_PF)
874 | RT_BIT(X86_XCPT_MF));
875#elif defined(HMVMX_ALWAYS_TRAP_PF)
876 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
877#endif
878 if (uXcptMask)
879 {
880 /* Validate we are not removing any essential exception intercepts. */
881#ifndef IN_NEM_DARWIN
882 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
883#else
884 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
885#endif
886 NOREF(pVCpu);
887 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
889
890 /* Remove it from the exception bitmap. */
891 u32XcptBitmap &= ~uXcptMask;
892
893 /* Commit and update the cache if necessary. */
894 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
895 {
896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
897 AssertRC(rc);
898 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
899 }
900 }
901 }
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Remove an exceptions from the exception bitmap and commits it to the current
908 * VMCS.
909 *
910 * @returns VBox status code.
911 * @param pVCpu The cross context virtual CPU structure.
912 * @param pVmxTransient The VMX-transient structure.
913 * @param uXcpt The exception to remove.
914 */
915static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
916{
917 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
918}
919
920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
921
922/**
923 * Loads the shadow VMCS specified by the VMCS info. object.
924 *
925 * @returns VBox status code.
926 * @param pVmcsInfo The VMCS info. object.
927 *
928 * @remarks Can be called with interrupts disabled.
929 */
930static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
931{
932 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
933 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
934
935 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
936 if (RT_SUCCESS(rc))
937 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
938 return rc;
939}
940
941
942/**
943 * Clears the shadow VMCS specified by the VMCS info. object.
944 *
945 * @returns VBox status code.
946 * @param pVmcsInfo The VMCS info. object.
947 *
948 * @remarks Can be called with interrupts disabled.
949 */
950static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
951{
952 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
953 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
954
955 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
956 if (RT_SUCCESS(rc))
957 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
958 return rc;
959}
960
961
962/**
963 * Switches from and to the specified VMCSes.
964 *
965 * @returns VBox status code.
966 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
967 * @param pVmcsInfoTo The VMCS info. object we are switching to.
968 *
969 * @remarks Called with interrupts disabled.
970 */
971static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
972{
973 /*
974 * Clear the VMCS we are switching out if it has not already been cleared.
975 * This will sync any CPU internal data back to the VMCS.
976 */
977 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
978 {
979 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
980 if (RT_SUCCESS(rc))
981 {
982 /*
983 * The shadow VMCS, if any, would not be active at this point since we
984 * would have cleared it while importing the virtual hardware-virtualization
985 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
986 * clear the shadow VMCS here, just assert for safety.
987 */
988 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
989 }
990 else
991 return rc;
992 }
993
994 /*
995 * Clear the VMCS we are switching to if it has not already been cleared.
996 * This will initialize the VMCS launch state to "clear" required for loading it.
997 *
998 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
999 */
1000 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1001 {
1002 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1003 if (RT_SUCCESS(rc))
1004 { /* likely */ }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Finally, load the VMCS we are switching to.
1011 */
1012 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1013}
1014
1015
1016/**
1017 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1018 * caller.
1019 *
1020 * @returns VBox status code.
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1023 * true) or guest VMCS (pass false).
1024 */
1025static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1026{
1027 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1028 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1029
1030 PVMXVMCSINFO pVmcsInfoFrom;
1031 PVMXVMCSINFO pVmcsInfoTo;
1032 if (fSwitchToNstGstVmcs)
1033 {
1034 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1035 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1036 }
1037 else
1038 {
1039 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1040 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1041 }
1042
1043 /*
1044 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1045 * preemption hook code path acquires the current VMCS.
1046 */
1047 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1048
1049 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1053 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1054
1055 /*
1056 * If we are switching to a VMCS that was executed on a different host CPU or was
1057 * never executed before, flag that we need to export the host state before executing
1058 * guest/nested-guest code using hardware-assisted VMX.
1059 *
1060 * This could probably be done in a preemptible context since the preemption hook
1061 * will flag the necessary change in host context. However, since preemption is
1062 * already disabled and to avoid making assumptions about host specific code in
1063 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1064 * disabled.
1065 */
1066 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1067 { /* likely */ }
1068 else
1069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1070
1071 ASMSetFlags(fEFlags);
1072
1073 /*
1074 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1075 * flag that we need to update the host MSR values there. Even if we decide in the
1076 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1077 * if its content differs, we would have to update the host MSRs anyway.
1078 */
1079 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1080 }
1081 else
1082 ASMSetFlags(fEFlags);
1083 return rc;
1084}
1085
1086#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1087#ifdef VBOX_STRICT
1088
1089/**
1090 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1091 * transient structure.
1092 *
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param pVmxTransient The VMX-transient structure.
1095 */
1096DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1097{
1098 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1099 AssertRC(rc);
1100}
1101
1102
1103/**
1104 * Reads the VM-entry exception error code field from the VMCS into
1105 * the VMX transient structure.
1106 *
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param pVmxTransient The VMX-transient structure.
1109 */
1110DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1111{
1112 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1113 AssertRC(rc);
1114}
1115
1116
1117/**
1118 * Reads the VM-entry exception error code field from the VMCS into
1119 * the VMX transient structure.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param pVmxTransient The VMX-transient structure.
1123 */
1124DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1125{
1126 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1127 AssertRC(rc);
1128}
1129
1130#endif /* VBOX_STRICT */
1131
1132
1133/**
1134 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1135 *
1136 * Don't call directly unless the it's likely that some or all of the fields
1137 * given in @a a_fReadMask have already been read.
1138 *
1139 * @tparam a_fReadMask The fields to read.
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143template<uint32_t const a_fReadMask>
1144static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1147 | HMVMX_READ_EXIT_INSTR_LEN
1148 | HMVMX_READ_EXIT_INSTR_INFO
1149 | HMVMX_READ_IDT_VECTORING_INFO
1150 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1151 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1152 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1153 | HMVMX_READ_GUEST_LINEAR_ADDR
1154 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1155 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1156 )) == 0);
1157
1158 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1159 {
1160 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1161
1162 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1163 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1164 {
1165 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1166 AssertRC(rc);
1167 }
1168 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1169 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1170 {
1171 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1172 AssertRC(rc);
1173 }
1174 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1175 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1176 {
1177 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1178 AssertRC(rc);
1179 }
1180 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1181 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1182 {
1183 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1184 AssertRC(rc);
1185 }
1186 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1187 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1188 {
1189 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1190 AssertRC(rc);
1191 }
1192 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1193 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1194 {
1195 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1196 AssertRC(rc);
1197 }
1198 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1199 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1200 {
1201 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1202 AssertRC(rc);
1203 }
1204 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1205 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1206 {
1207 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1208 AssertRC(rc);
1209 }
1210 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1211 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1212 {
1213 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1214 AssertRC(rc);
1215 }
1216 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1217 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1218 {
1219 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1220 AssertRC(rc);
1221 }
1222
1223 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1224 }
1225}
1226
1227
1228/**
1229 * Reads VMCS fields into the VMXTRANSIENT structure.
1230 *
1231 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1232 * generating an optimized read sequences w/o any conditionals between in
1233 * non-strict builds.
1234 *
1235 * @tparam a_fReadMask The fields to read. One or more of the
1236 * HMVMX_READ_XXX fields ORed together.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pVmxTransient The VMX-transient structure.
1239 */
1240template<uint32_t const a_fReadMask>
1241DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1242{
1243 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1244 | HMVMX_READ_EXIT_INSTR_LEN
1245 | HMVMX_READ_EXIT_INSTR_INFO
1246 | HMVMX_READ_IDT_VECTORING_INFO
1247 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1248 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1250 | HMVMX_READ_GUEST_LINEAR_ADDR
1251 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1252 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1253 )) == 0);
1254
1255 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1256 {
1257 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1258 {
1259 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1260 AssertRC(rc);
1261 }
1262 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1263 {
1264 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1265 AssertRC(rc);
1266 }
1267 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1268 {
1269 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1270 AssertRC(rc);
1271 }
1272 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1273 {
1274 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1275 AssertRC(rc);
1276 }
1277 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1278 {
1279 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1280 AssertRC(rc);
1281 }
1282 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1283 {
1284 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1285 AssertRC(rc);
1286 }
1287 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1288 {
1289 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1290 AssertRC(rc);
1291 }
1292 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1293 {
1294 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1295 AssertRC(rc);
1296 }
1297 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1298 {
1299 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1300 AssertRC(rc);
1301 }
1302 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1303 {
1304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1305 AssertRC(rc);
1306 }
1307
1308 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1309 }
1310 else
1311 {
1312 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1313 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1314 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1315 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1316 }
1317}
1318
1319
1320#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1321/**
1322 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1323 *
1324 * @param pVCpu The cross context virtual CPU structure.
1325 * @param pVmxTransient The VMX-transient structure.
1326 */
1327static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1328{
1329 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1330 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1336 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1337 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1340 | HMVMX_READ_EXIT_INSTR_LEN
1341 | HMVMX_READ_EXIT_INSTR_INFO
1342 | HMVMX_READ_IDT_VECTORING_INFO
1343 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1344 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1345 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1346 | HMVMX_READ_GUEST_LINEAR_ADDR
1347 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1348}
1349#endif
1350
1351/**
1352 * Verifies that our cached values of the VMCS fields are all consistent with
1353 * what's actually present in the VMCS.
1354 *
1355 * @returns VBox status code.
1356 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1357 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1358 * VMCS content. HMCPU error-field is
1359 * updated, see VMX_VCI_XXX.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param pVmcsInfo The VMCS info. object.
1362 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1363 */
1364static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1365{
1366 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1367
1368 uint32_t u32Val;
1369 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1372 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1379 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1386 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1391 AssertRC(rc);
1392 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1393 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1394 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1395 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1396
1397 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1398 {
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1402 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405 }
1406
1407 uint64_t u64Val;
1408 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1409 {
1410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1411 AssertRC(rc);
1412 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1413 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1414 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1415 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1416 }
1417
1418 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1421 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1426 AssertRC(rc);
1427 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1428 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1429 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1430 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1431
1432 NOREF(pcszVmcs);
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1439 * VMCS.
1440 *
1441 * This is typically required when the guest changes paging mode.
1442 *
1443 * @returns VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param pVmxTransient The VMX-transient structure.
1446 *
1447 * @remarks Requires EFER.
1448 * @remarks No-long-jump zone!!!
1449 */
1450static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1451{
1452 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1453 {
1454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1455 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1456
1457 /*
1458 * VM-entry controls.
1459 */
1460 {
1461 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1462 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1463
1464 /*
1465 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1466 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1467 *
1468 * For nested-guests, this is a mandatory VM-entry control. It's also
1469 * required because we do not want to leak host bits to the nested-guest.
1470 */
1471 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1472
1473 /*
1474 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1475 *
1476 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1477 * required to get the nested-guest working with hardware-assisted VMX execution.
1478 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1479 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1480 * here rather than while merging the guest VMCS controls.
1481 */
1482 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1483 {
1484 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1485 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1486 }
1487 else
1488 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1489
1490 /*
1491 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1492 *
1493 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1494 * regardless of whether the nested-guest VMCS specifies it because we are free to
1495 * load whatever MSRs we require and we do not need to modify the guest visible copy
1496 * of the VM-entry MSR load area.
1497 */
1498 if ( g_fHmVmxSupportsVmcsEfer
1499#ifndef IN_NEM_DARWIN
1500 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1501#endif
1502 )
1503 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1504 else
1505 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1506
1507 /*
1508 * The following should -not- be set (since we're not in SMM mode):
1509 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1510 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1511 */
1512
1513 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1514 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1515
1516 if ((fVal & fZap) == fVal)
1517 { /* likely */ }
1518 else
1519 {
1520 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1521 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1522 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1523 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1524 }
1525
1526 /* Commit it to the VMCS. */
1527 if (pVmcsInfo->u32EntryCtls != fVal)
1528 {
1529 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1530 AssertRC(rc);
1531 pVmcsInfo->u32EntryCtls = fVal;
1532 }
1533 }
1534
1535 /*
1536 * VM-exit controls.
1537 */
1538 {
1539 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1540 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1541
1542 /*
1543 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1544 * supported the 1-setting of this bit.
1545 *
1546 * For nested-guests, we set the "save debug controls" as the converse
1547 * "load debug controls" is mandatory for nested-guests anyway.
1548 */
1549 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1550
1551 /*
1552 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1553 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1554 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1555 * vmxHCExportHostMsrs().
1556 *
1557 * For nested-guests, we always set this bit as we do not support 32-bit
1558 * hosts.
1559 */
1560 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1561
1562#ifndef IN_NEM_DARWIN
1563 /*
1564 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1565 *
1566 * For nested-guests, we should use the "save IA32_EFER" control if we also
1567 * used the "load IA32_EFER" control while exporting VM-entry controls.
1568 */
1569 if ( g_fHmVmxSupportsVmcsEfer
1570 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1571 {
1572 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1573 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1574 }
1575#endif
1576
1577 /*
1578 * Enable saving of the VMX-preemption timer value on VM-exit.
1579 * For nested-guests, currently not exposed/used.
1580 */
1581 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1582 * the timer value. */
1583 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1584 {
1585 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1586 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1587 }
1588
1589 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1590 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1591
1592 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1593 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1594 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1595
1596 if ((fVal & fZap) == fVal)
1597 { /* likely */ }
1598 else
1599 {
1600 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1601 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1602 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1603 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1604 }
1605
1606 /* Commit it to the VMCS. */
1607 if (pVmcsInfo->u32ExitCtls != fVal)
1608 {
1609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1610 AssertRC(rc);
1611 pVmcsInfo->u32ExitCtls = fVal;
1612 }
1613 }
1614
1615 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1616 }
1617 return VINF_SUCCESS;
1618}
1619
1620
1621/**
1622 * Sets the TPR threshold in the VMCS.
1623 *
1624 * @param pVCpu The cross context virtual CPU structure.
1625 * @param pVmcsInfo The VMCS info. object.
1626 * @param u32TprThreshold The TPR threshold (task-priority class only).
1627 */
1628DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1629{
1630 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1631 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1632 RT_NOREF(pVmcsInfo);
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1634 AssertRC(rc);
1635}
1636
1637
1638/**
1639 * Exports the guest APIC TPR state into the VMCS.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param pVmxTransient The VMX-transient structure.
1643 *
1644 * @remarks No-long-jump zone!!!
1645 */
1646static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1647{
1648 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1649 {
1650 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1651
1652 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1653 if (!pVmxTransient->fIsNestedGuest)
1654 {
1655 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1656 && APICIsEnabled(pVCpu))
1657 {
1658 /*
1659 * Setup TPR shadowing.
1660 */
1661 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1662 {
1663 bool fPendingIntr = false;
1664 uint8_t u8Tpr = 0;
1665 uint8_t u8PendingIntr = 0;
1666 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1667 AssertRC(rc);
1668
1669 /*
1670 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1671 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1672 * priority of the pending interrupt so we can deliver the interrupt. If there
1673 * are no interrupts pending, set threshold to 0 to not cause any
1674 * TPR-below-threshold VM-exits.
1675 */
1676 uint32_t u32TprThreshold = 0;
1677 if (fPendingIntr)
1678 {
1679 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1680 (which is the Task-Priority Class). */
1681 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1682 const uint8_t u8TprPriority = u8Tpr >> 4;
1683 if (u8PendingPriority <= u8TprPriority)
1684 u32TprThreshold = u8PendingPriority;
1685 }
1686
1687 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1688 }
1689 }
1690 }
1691 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1692 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1693 }
1694}
1695
1696
1697/**
1698 * Gets the guest interruptibility-state and updates related force-flags.
1699 *
1700 * @returns Guest's interruptibility-state.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 *
1703 * @remarks No-long-jump zone!!!
1704 */
1705static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1706{
1707 uint32_t fIntrState;
1708
1709 /*
1710 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1711 */
1712 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1713 fIntrState = 0;
1714 else
1715 {
1716 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1718
1719 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1720 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1721 else
1722 {
1723 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1724
1725 /* Block-by-STI must not be set when interrupts are disabled. */
1726 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1727 }
1728 }
1729
1730 /*
1731 * Check if we should inhibit NMI delivery.
1732 */
1733 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1734 { /* likely */ }
1735 else
1736 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1737
1738 /*
1739 * Validate.
1740 */
1741 /* We don't support block-by-SMI yet.*/
1742 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1743
1744 return fIntrState;
1745}
1746
1747
1748/**
1749 * Exports the exception intercepts required for guest execution in the VMCS.
1750 *
1751 * @param pVCpu The cross context virtual CPU structure.
1752 * @param pVmxTransient The VMX-transient structure.
1753 *
1754 * @remarks No-long-jump zone!!!
1755 */
1756static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1757{
1758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1759 {
1760 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1761 if ( !pVmxTransient->fIsNestedGuest
1762 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1763 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1764 else
1765 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766
1767 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1769 }
1770}
1771
1772
1773/**
1774 * Exports the guest's RIP into the guest-state area in the VMCS.
1775 *
1776 * @param pVCpu The cross context virtual CPU structure.
1777 *
1778 * @remarks No-long-jump zone!!!
1779 */
1780static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1781{
1782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1783 {
1784 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1785
1786 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1787 AssertRC(rc);
1788
1789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1790 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1791 }
1792}
1793
1794
1795/**
1796 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1806 {
1807 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1808
1809 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1810 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1811 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1812 Use 32-bit VMWRITE. */
1813 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1814 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1815 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1816
1817#ifndef IN_NEM_DARWIN
1818 /*
1819 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1820 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1821 * can run the real-mode guest code under Virtual 8086 mode.
1822 */
1823 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1824 if (pVmcsInfo->RealMode.fRealOnV86Active)
1825 {
1826 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1827 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1828 Assert(!pVmxTransient->fIsNestedGuest);
1829 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1830 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1831 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1832 }
1833#else
1834 RT_NOREF(pVmxTransient);
1835#endif
1836
1837 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1838 AssertRC(rc);
1839
1840 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1841 Log4Func(("eflags=%#RX32\n", fEFlags));
1842 }
1843}
1844
1845
1846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1847/**
1848 * Copies the nested-guest VMCS to the shadow VMCS.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context virtual CPU structure.
1852 * @param pVmcsInfo The VMCS info. object.
1853 *
1854 * @remarks No-long-jump zone!!!
1855 */
1856static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1857{
1858 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1859 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1860
1861 /*
1862 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1863 * current VMCS, as we may try saving guest lazy MSRs.
1864 *
1865 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1866 * calling the import VMCS code which is currently performing the guest MSR reads
1867 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1868 * and the rest of the VMX leave session machinery.
1869 */
1870 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1871
1872 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1873 if (RT_SUCCESS(rc))
1874 {
1875 /*
1876 * Copy all guest read/write VMCS fields.
1877 *
1878 * We don't check for VMWRITE failures here for performance reasons and
1879 * because they are not expected to fail, barring irrecoverable conditions
1880 * like hardware errors.
1881 */
1882 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1883 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1884 {
1885 uint64_t u64Val;
1886 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1887 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1888 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1889 }
1890
1891 /*
1892 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1893 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1894 */
1895 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1896 {
1897 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1898 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1899 {
1900 uint64_t u64Val;
1901 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1902 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1903 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1904 }
1905 }
1906
1907 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1908 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1909 }
1910
1911 ASMSetFlags(fEFlags);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Copies the shadow VMCS to the nested-guest VMCS.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param pVmcsInfo The VMCS info. object.
1922 *
1923 * @remarks Called with interrupts disabled.
1924 */
1925static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1926{
1927 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1928 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1929 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1930
1931 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1932 if (RT_SUCCESS(rc))
1933 {
1934 /*
1935 * Copy guest read/write fields from the shadow VMCS.
1936 * Guest read-only fields cannot be modified, so no need to copy them.
1937 *
1938 * We don't check for VMREAD failures here for performance reasons and
1939 * because they are not expected to fail, barring irrecoverable conditions
1940 * like hardware errors.
1941 */
1942 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1943 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1944 {
1945 uint64_t u64Val;
1946 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1947 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1948 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1949 }
1950
1951 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1952 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1953 }
1954 return rc;
1955}
1956
1957
1958/**
1959 * Enables VMCS shadowing for the given VMCS info. object.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure.
1962 * @param pVmcsInfo The VMCS info. object.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1967{
1968 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1969 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1970 {
1971 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1972 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1973 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1974 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1975 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1976 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1977 Log4Func(("Enabled\n"));
1978 }
1979}
1980
1981
1982/**
1983 * Disables VMCS shadowing for the given VMCS info. object.
1984 *
1985 * @param pVCpu The cross context virtual CPU structure.
1986 * @param pVmcsInfo The VMCS info. object.
1987 *
1988 * @remarks No-long-jump zone!!!
1989 */
1990static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1991{
1992 /*
1993 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1994 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1995 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1996 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1997 *
1998 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1999 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2000 */
2001 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2002 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2003 {
2004 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2005 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2006 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2007 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2008 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2009 Log4Func(("Disabled\n"));
2010 }
2011}
2012#endif
2013
2014
2015/**
2016 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2017 *
2018 * The guest FPU state is always pre-loaded hence we don't need to bother about
2019 * sharing FPU related CR0 bits between the guest and host.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @param pVmxTransient The VMX-transient structure.
2024 *
2025 * @remarks No-long-jump zone!!!
2026 */
2027static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2028{
2029 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2030 {
2031 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2032 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2033
2034 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2035 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2036 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2037 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2038 else
2039 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2040
2041 if (!pVmxTransient->fIsNestedGuest)
2042 {
2043 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2044 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2045 uint64_t const u64ShadowCr0 = u64GuestCr0;
2046 Assert(!RT_HI_U32(u64GuestCr0));
2047
2048 /*
2049 * Setup VT-x's view of the guest CR0.
2050 */
2051 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2052 if (VM_IS_VMX_NESTED_PAGING(pVM))
2053 {
2054#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2055 if (CPUMIsGuestPagingEnabled(pVCpu))
2056 {
2057 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2058 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2059 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2060 }
2061 else
2062 {
2063 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2064 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2065 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2066 }
2067
2068 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2069 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2070 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2071#endif
2072 }
2073 else
2074 {
2075 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2076 u64GuestCr0 |= X86_CR0_WP;
2077 }
2078
2079 /*
2080 * Guest FPU bits.
2081 *
2082 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2083 * using CR0.TS.
2084 *
2085 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2086 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2087 */
2088 u64GuestCr0 |= X86_CR0_NE;
2089
2090 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2091 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2092
2093 /*
2094 * Update exception intercepts.
2095 */
2096 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2097#ifndef IN_NEM_DARWIN
2098 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2099 {
2100 Assert(PDMVmmDevHeapIsEnabled(pVM));
2101 Assert(pVM->hm.s.vmx.pRealModeTSS);
2102 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2103 }
2104 else
2105#endif
2106 {
2107 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2108 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2109 if (fInterceptMF)
2110 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2111 }
2112
2113 /* Additional intercepts for debugging, define these yourself explicitly. */
2114#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2115 uXcptBitmap |= 0
2116 | RT_BIT(X86_XCPT_BP)
2117 | RT_BIT(X86_XCPT_DE)
2118 | RT_BIT(X86_XCPT_NM)
2119 | RT_BIT(X86_XCPT_TS)
2120 | RT_BIT(X86_XCPT_UD)
2121 | RT_BIT(X86_XCPT_NP)
2122 | RT_BIT(X86_XCPT_SS)
2123 | RT_BIT(X86_XCPT_GP)
2124 | RT_BIT(X86_XCPT_PF)
2125 | RT_BIT(X86_XCPT_MF)
2126 ;
2127#elif defined(HMVMX_ALWAYS_TRAP_PF)
2128 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2129#endif
2130 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2131 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2132 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2134 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2135
2136 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2137 u64GuestCr0 |= fSetCr0;
2138 u64GuestCr0 &= fZapCr0;
2139 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2140
2141 /* Commit the CR0 and related fields to the guest VMCS. */
2142 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2143 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2144 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2145 {
2146 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2147 AssertRC(rc);
2148 }
2149 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2152 AssertRC(rc);
2153 }
2154
2155 /* Update our caches. */
2156 pVmcsInfo->u32ProcCtls = uProcCtls;
2157 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2158
2159 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2160 }
2161 else
2162 {
2163 /*
2164 * With nested-guests, we may have extended the guest/host mask here since we
2165 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2166 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2167 * originally supplied. We must copy those bits from the nested-guest CR0 into
2168 * the nested-guest CR0 read-shadow.
2169 */
2170 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2171 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2172 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2173 Assert(!RT_HI_U32(u64GuestCr0));
2174 Assert(u64GuestCr0 & X86_CR0_NE);
2175
2176 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2177 u64GuestCr0 |= fSetCr0;
2178 u64GuestCr0 &= fZapCr0;
2179 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2180
2181 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2182 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2183 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2184
2185 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2186 }
2187
2188 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Exports the guest control registers (CR3, CR4) into the guest-state area
2197 * in the VMCS.
2198 *
2199 * @returns VBox strict status code.
2200 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2201 * without unrestricted guest access and the VMMDev is not presently
2202 * mapped (e.g. EFI32).
2203 *
2204 * @param pVCpu The cross context virtual CPU structure.
2205 * @param pVmxTransient The VMX-transient structure.
2206 *
2207 * @remarks No-long-jump zone!!!
2208 */
2209static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2210{
2211 int rc = VINF_SUCCESS;
2212 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2213
2214 /*
2215 * Guest CR2.
2216 * It's always loaded in the assembler code. Nothing to do here.
2217 */
2218
2219 /*
2220 * Guest CR3.
2221 */
2222 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2223 {
2224 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2225
2226 if (VM_IS_VMX_NESTED_PAGING(pVM))
2227 {
2228#ifndef IN_NEM_DARWIN
2229 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2230 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2231
2232 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2233 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2234 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2235 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2236
2237 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2238 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2239 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2240
2241 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2242 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2243 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2244 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2245 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2246 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2247 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2248
2249 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2250 AssertRC(rc);
2251#endif
2252
2253 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2254 uint64_t u64GuestCr3 = pCtx->cr3;
2255 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2256 || CPUMIsGuestPagingEnabledEx(pCtx))
2257 {
2258 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2259 if (CPUMIsGuestInPAEModeEx(pCtx))
2260 {
2261 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2262 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2264 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2265 }
2266
2267 /*
2268 * The guest's view of its CR3 is unblemished with nested paging when the
2269 * guest is using paging or we have unrestricted guest execution to handle
2270 * the guest when it's not using paging.
2271 */
2272 }
2273#ifndef IN_NEM_DARWIN
2274 else
2275 {
2276 /*
2277 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2278 * thinks it accesses physical memory directly, we use our identity-mapped
2279 * page table to map guest-linear to guest-physical addresses. EPT takes care
2280 * of translating it to host-physical addresses.
2281 */
2282 RTGCPHYS GCPhys;
2283 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2284
2285 /* We obtain it here every time as the guest could have relocated this PCI region. */
2286 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2287 if (RT_SUCCESS(rc))
2288 { /* likely */ }
2289 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2290 {
2291 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2292 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2293 }
2294 else
2295 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2296
2297 u64GuestCr3 = GCPhys;
2298 }
2299#endif
2300
2301 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2302 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2303 AssertRC(rc);
2304 }
2305 else
2306 {
2307 Assert(!pVmxTransient->fIsNestedGuest);
2308 /* Non-nested paging case, just use the hypervisor's CR3. */
2309 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2310
2311 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2312 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2313 AssertRC(rc);
2314 }
2315
2316 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2317 }
2318
2319 /*
2320 * Guest CR4.
2321 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2322 */
2323 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2324 {
2325 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2326 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2327
2328 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2329 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2330
2331 /*
2332 * With nested-guests, we may have extended the guest/host mask here (since we
2333 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2334 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2335 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2336 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2337 */
2338 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2339 uint64_t u64GuestCr4 = pCtx->cr4;
2340 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2341 ? pCtx->cr4
2342 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2343 Assert(!RT_HI_U32(u64GuestCr4));
2344
2345#ifndef IN_NEM_DARWIN
2346 /*
2347 * Setup VT-x's view of the guest CR4.
2348 *
2349 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2350 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2351 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2352 *
2353 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2354 */
2355 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2356 {
2357 Assert(pVM->hm.s.vmx.pRealModeTSS);
2358 Assert(PDMVmmDevHeapIsEnabled(pVM));
2359 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2360 }
2361#endif
2362
2363 if (VM_IS_VMX_NESTED_PAGING(pVM))
2364 {
2365 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2366 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2367 {
2368 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2369 u64GuestCr4 |= X86_CR4_PSE;
2370 /* Our identity mapping is a 32-bit page directory. */
2371 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2372 }
2373 /* else use guest CR4.*/
2374 }
2375 else
2376 {
2377 Assert(!pVmxTransient->fIsNestedGuest);
2378
2379 /*
2380 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2381 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2382 */
2383 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2384 {
2385 case PGMMODE_REAL: /* Real-mode. */
2386 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2387 case PGMMODE_32_BIT: /* 32-bit paging. */
2388 {
2389 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2390 break;
2391 }
2392
2393 case PGMMODE_PAE: /* PAE paging. */
2394 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2395 {
2396 u64GuestCr4 |= X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2401 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2402 {
2403#ifdef VBOX_WITH_64_BITS_GUESTS
2404 /* For our assumption in vmxHCShouldSwapEferMsr. */
2405 Assert(u64GuestCr4 & X86_CR4_PAE);
2406 break;
2407#endif
2408 }
2409 default:
2410 AssertFailed();
2411 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2412 }
2413 }
2414
2415 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2416 u64GuestCr4 |= fSetCr4;
2417 u64GuestCr4 &= fZapCr4;
2418
2419 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2420 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2421 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2422
2423#ifndef IN_NEM_DARWIN
2424 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2425 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2426 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2427 {
2428 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2429 hmR0VmxUpdateStartVmFunction(pVCpu);
2430 }
2431#endif
2432
2433 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2434
2435 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2436 }
2437 return rc;
2438}
2439
2440
2441#ifdef VBOX_STRICT
2442/**
2443 * Strict function to validate segment registers.
2444 *
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @param pVmcsInfo The VMCS info. object.
2447 *
2448 * @remarks Will import guest CR0 on strict builds during validation of
2449 * segments.
2450 */
2451static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2452{
2453 /*
2454 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2455 *
2456 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2457 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2458 * unusable bit and doesn't change the guest-context value.
2459 */
2460 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2461 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2462 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2463 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2464 && ( !CPUMIsGuestInRealModeEx(pCtx)
2465 && !CPUMIsGuestInV86ModeEx(pCtx)))
2466 {
2467 /* Protected mode checks */
2468 /* CS */
2469 Assert(pCtx->cs.Attr.n.u1Present);
2470 Assert(!(pCtx->cs.Attr.u & 0xf00));
2471 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2472 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2473 || !(pCtx->cs.Attr.n.u1Granularity));
2474 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2475 || (pCtx->cs.Attr.n.u1Granularity));
2476 /* CS cannot be loaded with NULL in protected mode. */
2477 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2478 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2479 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2480 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2481 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2482 else
2483 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2484 /* SS */
2485 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2486 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2487 if ( !(pCtx->cr0 & X86_CR0_PE)
2488 || pCtx->cs.Attr.n.u4Type == 3)
2489 {
2490 Assert(!pCtx->ss.Attr.n.u2Dpl);
2491 }
2492 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2493 {
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2496 Assert(pCtx->ss.Attr.n.u1Present);
2497 Assert(!(pCtx->ss.Attr.u & 0xf00));
2498 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2499 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2500 || !(pCtx->ss.Attr.n.u1Granularity));
2501 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2502 || (pCtx->ss.Attr.n.u1Granularity));
2503 }
2504 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2505 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2506 {
2507 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2508 Assert(pCtx->ds.Attr.n.u1Present);
2509 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2510 Assert(!(pCtx->ds.Attr.u & 0xf00));
2511 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2512 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2513 || !(pCtx->ds.Attr.n.u1Granularity));
2514 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2515 || (pCtx->ds.Attr.n.u1Granularity));
2516 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2517 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2518 }
2519 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->es.Attr.n.u1Present);
2523 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->es.Attr.u & 0xf00));
2525 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->es.Attr.n.u1Granularity));
2528 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2529 || (pCtx->es.Attr.n.u1Granularity));
2530 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->fs.Attr.n.u1Present);
2537 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->fs.Attr.u & 0xf00));
2539 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->fs.Attr.n.u1Granularity));
2542 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2543 || (pCtx->fs.Attr.n.u1Granularity));
2544 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->gs.Attr.n.u1Present);
2551 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->gs.Attr.u & 0xf00));
2553 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->gs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2557 || (pCtx->gs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 /* 64-bit capable CPUs. */
2562 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2563 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2564 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2565 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2566 }
2567 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2568 || ( CPUMIsGuestInRealModeEx(pCtx)
2569 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2570 {
2571 /* Real and v86 mode checks. */
2572 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2573 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2574#ifndef IN_NEM_DARWIN
2575 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2576 {
2577 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2578 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2579 }
2580 else
2581#endif
2582 {
2583 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2584 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2585 }
2586
2587 /* CS */
2588 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2589 Assert(pCtx->cs.u32Limit == 0xffff);
2590 Assert(u32CSAttr == 0xf3);
2591 /* SS */
2592 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2593 Assert(pCtx->ss.u32Limit == 0xffff);
2594 Assert(u32SSAttr == 0xf3);
2595 /* DS */
2596 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2597 Assert(pCtx->ds.u32Limit == 0xffff);
2598 Assert(u32DSAttr == 0xf3);
2599 /* ES */
2600 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2601 Assert(pCtx->es.u32Limit == 0xffff);
2602 Assert(u32ESAttr == 0xf3);
2603 /* FS */
2604 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2605 Assert(pCtx->fs.u32Limit == 0xffff);
2606 Assert(u32FSAttr == 0xf3);
2607 /* GS */
2608 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2609 Assert(pCtx->gs.u32Limit == 0xffff);
2610 Assert(u32GSAttr == 0xf3);
2611 /* 64-bit capable CPUs. */
2612 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2613 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2614 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2615 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2616 }
2617}
2618#endif /* VBOX_STRICT */
2619
2620
2621/**
2622 * Exports a guest segment register into the guest-state area in the VMCS.
2623 *
2624 * @returns VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure.
2626 * @param pVmcsInfo The VMCS info. object.
2627 * @param iSegReg The segment register number (X86_SREG_XXX).
2628 * @param pSelReg Pointer to the segment selector.
2629 *
2630 * @remarks No-long-jump zone!!!
2631 */
2632static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2633{
2634 Assert(iSegReg < X86_SREG_COUNT);
2635
2636 uint32_t u32Access = pSelReg->Attr.u;
2637#ifndef IN_NEM_DARWIN
2638 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2639#endif
2640 {
2641 /*
2642 * The way to differentiate between whether this is really a null selector or was just
2643 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2644 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2645 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2646 * NULL selectors loaded in protected-mode have their attribute as 0.
2647 */
2648 if (u32Access)
2649 { }
2650 else
2651 u32Access = X86DESCATTR_UNUSABLE;
2652 }
2653#ifndef IN_NEM_DARWIN
2654 else
2655 {
2656 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2657 u32Access = 0xf3;
2658 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2659 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2660 RT_NOREF_PV(pVCpu);
2661 }
2662#else
2663 RT_NOREF(pVmcsInfo);
2664#endif
2665
2666 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2667 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2668 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2669
2670 /*
2671 * Commit it to the VMCS.
2672 */
2673 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2674 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2675 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2676 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2677 return VINF_SUCCESS;
2678}
2679
2680
2681/**
2682 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2683 * area in the VMCS.
2684 *
2685 * @returns VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @param pVmxTransient The VMX-transient structure.
2688 *
2689 * @remarks Will import guest CR0 on strict builds during validation of
2690 * segments.
2691 * @remarks No-long-jump zone!!!
2692 */
2693static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2694{
2695 int rc = VERR_INTERNAL_ERROR_5;
2696#ifndef IN_NEM_DARWIN
2697 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2698#endif
2699 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2701#ifndef IN_NEM_DARWIN
2702 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2703#endif
2704
2705 /*
2706 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2707 */
2708 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2709 {
2710 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2711 {
2712 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2713#ifndef IN_NEM_DARWIN
2714 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2715 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2716#endif
2717 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2718 AssertRC(rc);
2719 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2720 }
2721
2722 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2723 {
2724 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2725#ifndef IN_NEM_DARWIN
2726 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2727 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2728#endif
2729 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2730 AssertRC(rc);
2731 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2732 }
2733
2734 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2735 {
2736 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2737#ifndef IN_NEM_DARWIN
2738 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2739 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2740#endif
2741 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2742 AssertRC(rc);
2743 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2744 }
2745
2746 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2747 {
2748 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2749#ifndef IN_NEM_DARWIN
2750 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2751 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2752#endif
2753 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2754 AssertRC(rc);
2755 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2756 }
2757
2758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2759 {
2760 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2761#ifndef IN_NEM_DARWIN
2762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2763 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2764#endif
2765 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2766 AssertRC(rc);
2767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2768 }
2769
2770 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2771 {
2772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2773#ifndef IN_NEM_DARWIN
2774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2775 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2776#endif
2777 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2778 AssertRC(rc);
2779 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2780 }
2781
2782#ifdef VBOX_STRICT
2783 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2784#endif
2785 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2786 pCtx->cs.Attr.u));
2787 }
2788
2789 /*
2790 * Guest TR.
2791 */
2792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2793 {
2794 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2795
2796 /*
2797 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2798 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2799 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2800 */
2801 uint16_t u16Sel;
2802 uint32_t u32Limit;
2803 uint64_t u64Base;
2804 uint32_t u32AccessRights;
2805#ifndef IN_NEM_DARWIN
2806 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2807#endif
2808 {
2809 u16Sel = pCtx->tr.Sel;
2810 u32Limit = pCtx->tr.u32Limit;
2811 u64Base = pCtx->tr.u64Base;
2812 u32AccessRights = pCtx->tr.Attr.u;
2813 }
2814#ifndef IN_NEM_DARWIN
2815 else
2816 {
2817 Assert(!pVmxTransient->fIsNestedGuest);
2818 Assert(pVM->hm.s.vmx.pRealModeTSS);
2819 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2820
2821 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2822 RTGCPHYS GCPhys;
2823 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2824 AssertRCReturn(rc, rc);
2825
2826 X86DESCATTR DescAttr;
2827 DescAttr.u = 0;
2828 DescAttr.n.u1Present = 1;
2829 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2830
2831 u16Sel = 0;
2832 u32Limit = HM_VTX_TSS_SIZE;
2833 u64Base = GCPhys;
2834 u32AccessRights = DescAttr.u;
2835 }
2836#endif
2837
2838 /* Validate. */
2839 Assert(!(u16Sel & RT_BIT(2)));
2840 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2841 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2842 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2843 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2844 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2845 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2846 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2847 Assert( (u32Limit & 0xfff) == 0xfff
2848 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2849 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2850 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2851
2852 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2853 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2854 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2855 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2856
2857 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2858 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2859 }
2860
2861 /*
2862 * Guest GDTR.
2863 */
2864 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2865 {
2866 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2867
2868 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2869 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2870
2871 /* Validate. */
2872 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2873
2874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2875 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2876 }
2877
2878 /*
2879 * Guest LDTR.
2880 */
2881 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2882 {
2883 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2884
2885 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2886 uint32_t u32Access;
2887 if ( !pVmxTransient->fIsNestedGuest
2888 && !pCtx->ldtr.Attr.u)
2889 u32Access = X86DESCATTR_UNUSABLE;
2890 else
2891 u32Access = pCtx->ldtr.Attr.u;
2892
2893 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2897
2898 /* Validate. */
2899 if (!(u32Access & X86DESCATTR_UNUSABLE))
2900 {
2901 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2902 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2903 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2904 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2905 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2906 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2907 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2908 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2909 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2910 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2911 }
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2914 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2915 }
2916
2917 /*
2918 * Guest IDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2923
2924 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2925 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2926
2927 /* Validate. */
2928 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2929
2930 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2931 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2932 }
2933
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2940 * VM-exit interruption info type.
2941 *
2942 * @returns The IEM exception flags.
2943 * @param uVector The event vector.
2944 * @param uVmxEventType The VMX event type.
2945 *
2946 * @remarks This function currently only constructs flags required for
2947 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2948 * and CR2 aspects of an exception are not included).
2949 */
2950static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2951{
2952 uint32_t fIemXcptFlags;
2953 switch (uVmxEventType)
2954 {
2955 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2956 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2957 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2958 break;
2959
2960 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2969 {
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2971 if (uVector == X86_XCPT_BP)
2972 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2973 else if (uVector == X86_XCPT_OF)
2974 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2975 else
2976 {
2977 fIemXcptFlags = 0;
2978 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2979 }
2980 break;
2981 }
2982
2983 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2985 break;
2986
2987 default:
2988 fIemXcptFlags = 0;
2989 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2990 break;
2991 }
2992 return fIemXcptFlags;
2993}
2994
2995
2996/**
2997 * Sets an event as a pending event to be injected into the guest.
2998 *
2999 * @param pVCpu The cross context virtual CPU structure.
3000 * @param u32IntInfo The VM-entry interruption-information field.
3001 * @param cbInstr The VM-entry instruction length in bytes (for
3002 * software interrupts, exceptions and privileged
3003 * software exceptions).
3004 * @param u32ErrCode The VM-entry exception error code.
3005 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3006 * page-fault.
3007 */
3008DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3009 RTGCUINTPTR GCPtrFaultAddress)
3010{
3011 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3012 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3013 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3014 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3015 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3016 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3017}
3018
3019
3020/**
3021 * Sets an external interrupt as pending-for-injection into the VM.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure.
3024 * @param u8Interrupt The external interrupt vector.
3025 */
3026DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3027{
3028 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3029 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3030 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3031 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3032 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3033}
3034
3035
3036/**
3037 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3038 *
3039 * @param pVCpu The cross context virtual CPU structure.
3040 */
3041DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3042{
3043 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3044 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3045 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3046 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3047 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3048}
3049
3050
3051/**
3052 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3057{
3058 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3059 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3060 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3061 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3062 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3063}
3064
3065
3066/**
3067 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3068 *
3069 * @param pVCpu The cross context virtual CPU structure.
3070 */
3071DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3072{
3073 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3074 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3075 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3076 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3077 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3078}
3079
3080
3081/**
3082 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3083 *
3084 * @param pVCpu The cross context virtual CPU structure.
3085 */
3086DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3087{
3088 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3089 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3090 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3091 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3092 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3093}
3094
3095
3096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3097/**
3098 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3099 *
3100 * @param pVCpu The cross context virtual CPU structure.
3101 * @param u32ErrCode The error code for the general-protection exception.
3102 */
3103DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3104{
3105 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3106 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3109 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3110}
3111
3112
3113/**
3114 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3115 *
3116 * @param pVCpu The cross context virtual CPU structure.
3117 * @param u32ErrCode The error code for the stack exception.
3118 */
3119DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3120{
3121 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3122 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3125 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3126}
3127#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3128
3129
3130/**
3131 * Fixes up attributes for the specified segment register.
3132 *
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param pSelReg The segment register that needs fixing.
3135 * @param pszRegName The register name (for logging and assertions).
3136 */
3137static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3138{
3139 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3140
3141 /*
3142 * If VT-x marks the segment as unusable, most other bits remain undefined:
3143 * - For CS the L, D and G bits have meaning.
3144 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3145 * - For the remaining data segments no bits are defined.
3146 *
3147 * The present bit and the unusable bit has been observed to be set at the
3148 * same time (the selector was supposed to be invalid as we started executing
3149 * a V8086 interrupt in ring-0).
3150 *
3151 * What should be important for the rest of the VBox code, is that the P bit is
3152 * cleared. Some of the other VBox code recognizes the unusable bit, but
3153 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3154 * safe side here, we'll strip off P and other bits we don't care about. If
3155 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3156 *
3157 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3158 */
3159#ifdef VBOX_STRICT
3160 uint32_t const uAttr = pSelReg->Attr.u;
3161#endif
3162
3163 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3164 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3165 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3166
3167#ifdef VBOX_STRICT
3168# ifndef IN_NEM_DARWIN
3169 VMMRZCallRing3Disable(pVCpu);
3170# endif
3171 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3172# ifdef DEBUG_bird
3173 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3174 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3175 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3176# endif
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Enable(pVCpu);
3179# endif
3180 NOREF(uAttr);
3181#endif
3182 RT_NOREF2(pVCpu, pszRegName);
3183}
3184
3185
3186/**
3187 * Imports a guest segment register from the current VMCS into the guest-CPU
3188 * context.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3192 *
3193 * @remarks Called with interrupts and/or preemption disabled.
3194 */
3195template<uint32_t const a_iSegReg>
3196DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3197{
3198 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3199 /* Check that the macros we depend upon here and in the export parenter function works: */
3200#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3201 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3202 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3203 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3204 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3205 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3207 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3208 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3209 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3210 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3211
3212 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3213
3214 uint16_t u16Sel;
3215 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3216 pSelReg->Sel = u16Sel;
3217 pSelReg->ValidSel = u16Sel;
3218
3219 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3220 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3221
3222 uint32_t u32Attr;
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3224 pSelReg->Attr.u = u32Attr;
3225 if (u32Attr & X86DESCATTR_UNUSABLE)
3226 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3227
3228 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3229}
3230
3231
3232/**
3233 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure.
3236 *
3237 * @remarks Called with interrupts and/or preemption disabled.
3238 */
3239DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3240{
3241 uint16_t u16Sel;
3242 uint64_t u64Base;
3243 uint32_t u32Limit, u32Attr;
3244 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3245 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3246 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3247 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3248
3249 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3250 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3251 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3252 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3253 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3254 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3257}
3258
3259
3260/**
3261 * Imports the guest TR from the current VMCS into the guest-CPU context.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 *
3265 * @remarks Called with interrupts and/or preemption disabled.
3266 */
3267DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3268{
3269 uint16_t u16Sel;
3270 uint64_t u64Base;
3271 uint32_t u32Limit, u32Attr;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3274 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3276
3277 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3278 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3279 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3280 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3281 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3282 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3283 /* TR is the only selector that can never be unusable. */
3284 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3285}
3286
3287
3288/**
3289 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3290 *
3291 * @returns The RIP value.
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3295 * @remarks Do -not- call this function directly!
3296 */
3297DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3298{
3299 uint64_t u64Val;
3300 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3301 AssertRC(rc);
3302
3303 pVCpu->cpum.GstCtx.rip = u64Val;
3304
3305 return u64Val;
3306}
3307
3308
3309/**
3310 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3311 *
3312 * @param pVCpu The cross context virtual CPU structure.
3313 *
3314 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3315 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3316 * instead!!!
3317 */
3318DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3319{
3320 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3321 {
3322 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3323 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3324 }
3325}
3326
3327
3328/**
3329 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3330 *
3331 * @param pVCpu The cross context virtual CPU structure.
3332 * @param pVmcsInfo The VMCS info. object.
3333 *
3334 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3335 * @remarks Do -not- call this function directly!
3336 */
3337DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3338{
3339 uint64_t fRFlags;
3340 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3341 AssertRC(rc);
3342
3343 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3344 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3345
3346 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3347#ifndef IN_NEM_DARWIN
3348 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3349 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3350 { /* mostly likely */ }
3351 else
3352 {
3353 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3354 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3355 }
3356#else
3357 RT_NOREF(pVmcsInfo);
3358#endif
3359}
3360
3361
3362/**
3363 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3364 *
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pVmcsInfo The VMCS info. object.
3367 *
3368 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3369 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3370 * instead!!!
3371 */
3372DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3373{
3374 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3375 {
3376 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3377 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3378 }
3379}
3380
3381
3382/**
3383 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3384 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3385 */
3386DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3387{
3388 /*
3389 * We must import RIP here to set our EM interrupt-inhibited state.
3390 * We also import RFLAGS as our code that evaluates pending interrupts
3391 * before VM-entry requires it.
3392 */
3393 vmxHCImportGuestRip(pVCpu);
3394 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3395
3396 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3397 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3398 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3399 pVCpu->cpum.GstCtx.rip);
3400 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3401}
3402
3403
3404/**
3405 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3406 * context.
3407 *
3408 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3409 *
3410 * @param pVCpu The cross context virtual CPU structure.
3411 * @param pVmcsInfo The VMCS info. object.
3412 *
3413 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3414 * do not log!
3415 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3416 * instead!!!
3417 */
3418DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3419{
3420 uint32_t u32Val;
3421 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3422 if (!u32Val)
3423 {
3424 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3425 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3426 }
3427 else
3428 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3429}
3430
3431
3432/**
3433 * Worker for VMXR0ImportStateOnDemand.
3434 *
3435 * @returns VBox status code.
3436 * @param pVCpu The cross context virtual CPU structure.
3437 * @param pVmcsInfo The VMCS info. object.
3438 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3439 */
3440static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3441{
3442 int rc = VINF_SUCCESS;
3443 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3444 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3445 uint32_t u32Val;
3446
3447 /*
3448 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3449 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3450 * neither are other host platforms.
3451 *
3452 * Committing this temporarily as it prevents BSOD.
3453 *
3454 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3455 */
3456#ifdef RT_OS_WINDOWS
3457 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3458 return VERR_HM_IPE_1;
3459#endif
3460
3461 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3462
3463#ifndef IN_NEM_DARWIN
3464 /*
3465 * We disable interrupts to make the updating of the state and in particular
3466 * the fExtrn modification atomic wrt to preemption hooks.
3467 */
3468 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3469#endif
3470
3471 fWhat &= pCtx->fExtrn;
3472 if (fWhat)
3473 {
3474 do
3475 {
3476 if (fWhat & CPUMCTX_EXTRN_RIP)
3477 vmxHCImportGuestRip(pVCpu);
3478
3479 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3480 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3481
3482 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3483 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3484 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3485
3486 if (fWhat & CPUMCTX_EXTRN_RSP)
3487 {
3488 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3489 AssertRC(rc);
3490 }
3491
3492 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3493 {
3494 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3495#ifndef IN_NEM_DARWIN
3496 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3497#else
3498 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3499#endif
3500 if (fWhat & CPUMCTX_EXTRN_CS)
3501 {
3502 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3503 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3504 if (fRealOnV86Active)
3505 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3506 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_SS)
3509 {
3510 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3511 if (fRealOnV86Active)
3512 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_DS)
3515 {
3516 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3517 if (fRealOnV86Active)
3518 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_ES)
3521 {
3522 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3523 if (fRealOnV86Active)
3524 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3525 }
3526 if (fWhat & CPUMCTX_EXTRN_FS)
3527 {
3528 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3529 if (fRealOnV86Active)
3530 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3531 }
3532 if (fWhat & CPUMCTX_EXTRN_GS)
3533 {
3534 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3535 if (fRealOnV86Active)
3536 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3537 }
3538 }
3539
3540 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3541 {
3542 if (fWhat & CPUMCTX_EXTRN_LDTR)
3543 vmxHCImportGuestLdtr(pVCpu);
3544
3545 if (fWhat & CPUMCTX_EXTRN_GDTR)
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3548 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3549 pCtx->gdtr.cbGdt = u32Val;
3550 }
3551
3552 /* Guest IDTR. */
3553 if (fWhat & CPUMCTX_EXTRN_IDTR)
3554 {
3555 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3556 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3557 pCtx->idtr.cbIdt = u32Val;
3558 }
3559
3560 /* Guest TR. */
3561 if (fWhat & CPUMCTX_EXTRN_TR)
3562 {
3563#ifndef IN_NEM_DARWIN
3564 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3565 don't need to import that one. */
3566 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3567#endif
3568 vmxHCImportGuestTr(pVCpu);
3569 }
3570 }
3571
3572 if (fWhat & CPUMCTX_EXTRN_DR7)
3573 {
3574#ifndef IN_NEM_DARWIN
3575 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3576#endif
3577 {
3578 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3579 AssertRC(rc);
3580 }
3581 }
3582
3583 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3584 {
3585 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3586 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3587 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3588 pCtx->SysEnter.cs = u32Val;
3589 }
3590
3591#ifndef IN_NEM_DARWIN
3592 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3593 {
3594 if ( pVM->hmr0.s.fAllow64BitGuests
3595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3596 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3597 }
3598
3599 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3600 {
3601 if ( pVM->hmr0.s.fAllow64BitGuests
3602 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3603 {
3604 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3605 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3606 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3607 }
3608 }
3609
3610 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3611 {
3612 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3613 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3614 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3615 Assert(pMsrs);
3616 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3617 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3618 for (uint32_t i = 0; i < cMsrs; i++)
3619 {
3620 uint32_t const idMsr = pMsrs[i].u32Msr;
3621 switch (idMsr)
3622 {
3623 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3624 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3625 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3626 default:
3627 {
3628 uint32_t idxLbrMsr;
3629 if (VM_IS_VMX_LBR(pVM))
3630 {
3631 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3632 {
3633 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3634 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3635 break;
3636 }
3637 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3638 {
3639 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3640 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3641 break;
3642 }
3643 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3644 {
3645 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3646 break;
3647 }
3648 /* Fallthru (no break) */
3649 }
3650 pCtx->fExtrn = 0;
3651 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3652 ASMSetFlags(fEFlags);
3653 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3654 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3655 }
3656 }
3657 }
3658 }
3659#endif
3660
3661 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3662 {
3663 if (fWhat & CPUMCTX_EXTRN_CR0)
3664 {
3665 uint64_t u64Cr0;
3666 uint64_t u64Shadow;
3667 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3669#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3670 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3671 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3672#else
3673 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3674 {
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3677 }
3678 else
3679 {
3680 /*
3681 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3682 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3683 * re-construct CR0. See @bugref{9180#c95} for details.
3684 */
3685 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3686 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3687 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3688 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3689 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3690 }
3691#endif
3692#ifndef IN_NEM_DARWIN
3693 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3694#endif
3695 CPUMSetGuestCR0(pVCpu, u64Cr0);
3696#ifndef IN_NEM_DARWIN
3697 VMMRZCallRing3Enable(pVCpu);
3698#endif
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_CR4)
3702 {
3703 uint64_t u64Cr4;
3704 uint64_t u64Shadow;
3705 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3706 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3707#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3708 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3709 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3710#else
3711 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3712 {
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3715 }
3716 else
3717 {
3718 /*
3719 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3720 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3721 * re-construct CR4. See @bugref{9180#c95} for details.
3722 */
3723 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3724 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3725 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3726 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3727 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3728 }
3729#endif
3730 pCtx->cr4 = u64Cr4;
3731 }
3732
3733 if (fWhat & CPUMCTX_EXTRN_CR3)
3734 {
3735 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3736 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3737 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3738 && CPUMIsGuestPagingEnabledEx(pCtx)))
3739 {
3740 uint64_t u64Cr3;
3741 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3742 if (pCtx->cr3 != u64Cr3)
3743 {
3744 pCtx->cr3 = u64Cr3;
3745 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3746 }
3747
3748 /*
3749 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3750 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3751 */
3752 if (CPUMIsGuestInPAEModeEx(pCtx))
3753 {
3754 X86PDPE aPaePdpes[4];
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3756 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3758 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3759 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3760 {
3761 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3762 /* PGM now updates PAE PDPTEs while updating CR3. */
3763 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3764 }
3765 }
3766 }
3767 }
3768 }
3769
3770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3771 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3772 {
3773 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3774 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3775 {
3776 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3777 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3778 if (RT_SUCCESS(rc))
3779 { /* likely */ }
3780 else
3781 break;
3782 }
3783 }
3784#endif
3785 } while (0);
3786
3787 if (RT_SUCCESS(rc))
3788 {
3789 /* Update fExtrn. */
3790 pCtx->fExtrn &= ~fWhat;
3791
3792 /* If everything has been imported, clear the HM keeper bit. */
3793 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3794 {
3795#ifndef IN_NEM_DARWIN
3796 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3797#else
3798 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3799#endif
3800 Assert(!pCtx->fExtrn);
3801 }
3802 }
3803 }
3804#ifndef IN_NEM_DARWIN
3805 else
3806 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3807
3808 /*
3809 * Restore interrupts.
3810 */
3811 ASMSetFlags(fEFlags);
3812#endif
3813
3814 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3815
3816 if (RT_SUCCESS(rc))
3817 { /* likely */ }
3818 else
3819 return rc;
3820
3821 /*
3822 * Honor any pending CR3 updates.
3823 *
3824 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3825 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3826 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3827 *
3828 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3829 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3830 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3831 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3832 *
3833 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3834 *
3835 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3836 */
3837 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3838#ifndef IN_NEM_DARWIN
3839 && VMMRZCallRing3IsEnabled(pVCpu)
3840#endif
3841 )
3842 {
3843 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3844 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3845 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3846 }
3847
3848 return VINF_SUCCESS;
3849}
3850
3851
3852/**
3853 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3854 *
3855 * @returns VBox status code.
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param pVmcsInfo The VMCS info. object.
3858 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3859 * in NEM/darwin context.
3860 * @tparam a_fWhat What to import, zero or more bits from
3861 * HMVMX_CPUMCTX_EXTRN_ALL.
3862 */
3863template<uint64_t const a_fWhat>
3864static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3865{
3866 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3867 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3868 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3869 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3870
3871 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3872
3873 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3874
3875 /* RIP and RFLAGS may have been imported already by the post exit code
3876 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3877 of the code is skipping this part of the code. */
3878 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3879 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3880 {
3881 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3882 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3883
3884 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3885 {
3886 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3887 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3888 else
3889 vmxHCImportGuestCoreRip(pVCpu);
3890 }
3891 }
3892
3893 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3894 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3895 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3896
3897 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3898 {
3899 if (a_fWhat & CPUMCTX_EXTRN_CS)
3900 {
3901 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3902 /** @todo try get rid of this carp, it smells and is probably never ever
3903 * used: */
3904 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3905 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3906 {
3907 vmxHCImportGuestCoreRip(pVCpu);
3908 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3909 }
3910 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3911 }
3912 if (a_fWhat & CPUMCTX_EXTRN_SS)
3913 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3914 if (a_fWhat & CPUMCTX_EXTRN_DS)
3915 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3916 if (a_fWhat & CPUMCTX_EXTRN_ES)
3917 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3918 if (a_fWhat & CPUMCTX_EXTRN_FS)
3919 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3920 if (a_fWhat & CPUMCTX_EXTRN_GS)
3921 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3922
3923 /* Guest TR.
3924 Real-mode emulation using virtual-8086 mode has the fake TSS
3925 (pRealModeTSS) in TR, don't need to import that one. */
3926#ifndef IN_NEM_DARWIN
3927 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3928 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3929 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3930#else
3931 if (a_fWhat & CPUMCTX_EXTRN_TR)
3932#endif
3933 vmxHCImportGuestTr(pVCpu);
3934
3935#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3936 if (fRealOnV86Active)
3937 {
3938 if (a_fWhat & CPUMCTX_EXTRN_CS)
3939 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3940 if (a_fWhat & CPUMCTX_EXTRN_SS)
3941 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3942 if (a_fWhat & CPUMCTX_EXTRN_DS)
3943 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3944 if (a_fWhat & CPUMCTX_EXTRN_ES)
3945 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3946 if (a_fWhat & CPUMCTX_EXTRN_FS)
3947 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3948 if (a_fWhat & CPUMCTX_EXTRN_GS)
3949 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3950 }
3951#endif
3952 }
3953
3954 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3955 {
3956 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3957 AssertRC(rc);
3958 }
3959
3960 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3961 vmxHCImportGuestLdtr(pVCpu);
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3964 {
3965 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3966 uint32_t u32Val;
3967 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3968 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3969 }
3970
3971 /* Guest IDTR. */
3972 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3978 }
3979
3980 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3981 {
3982#ifndef IN_NEM_DARWIN
3983 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3984#endif
3985 {
3986 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3987 AssertRC(rc);
3988 }
3989 }
3990
3991 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3992 {
3993 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3994 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3995 uint32_t u32Val;
3996 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3997 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3998 }
3999
4000#ifndef IN_NEM_DARWIN
4001 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4002 {
4003 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4004 && pVM->hmr0.s.fAllow64BitGuests)
4005 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4006 }
4007
4008 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4009 {
4010 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4011 && pVM->hmr0.s.fAllow64BitGuests)
4012 {
4013 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4014 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4015 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4016 }
4017 }
4018
4019 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4020 {
4021 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4022 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4023 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4024 Assert(pMsrs);
4025 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4026 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4027 for (uint32_t i = 0; i < cMsrs; i++)
4028 {
4029 uint32_t const idMsr = pMsrs[i].u32Msr;
4030 switch (idMsr)
4031 {
4032 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4033 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4034 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4035 default:
4036 {
4037 uint32_t idxLbrMsr;
4038 if (VM_IS_VMX_LBR(pVM))
4039 {
4040 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4041 {
4042 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4043 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4044 break;
4045 }
4046 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4047 {
4048 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4049 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4050 break;
4051 }
4052 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4053 {
4054 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4055 break;
4056 }
4057 }
4058 pVCpu->cpum.GstCtx.fExtrn = 0;
4059 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4060 ASMSetFlags(fEFlags);
4061 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4062 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4063 }
4064 }
4065 }
4066 }
4067#endif
4068
4069 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4070 {
4071 uint64_t u64Cr0;
4072 uint64_t u64Shadow;
4073 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4074 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4075#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4076 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4077 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4078#else
4079 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4080 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4081 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4082 else
4083 {
4084 /*
4085 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4086 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4087 * re-construct CR0. See @bugref{9180#c95} for details.
4088 */
4089 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4090 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4091 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4092 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4093 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4094 }
4095#endif
4096#ifndef IN_NEM_DARWIN
4097 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4098#endif
4099 CPUMSetGuestCR0(pVCpu, u64Cr0);
4100#ifndef IN_NEM_DARWIN
4101 VMMRZCallRing3Enable(pVCpu);
4102#endif
4103 }
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 {
4107 uint64_t u64Cr4;
4108 uint64_t u64Shadow;
4109 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4110 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4111#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4112 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4113 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4114#else
4115 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4117 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4118 else
4119 {
4120 /*
4121 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4122 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4123 * re-construct CR4. See @bugref{9180#c95} for details.
4124 */
4125 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4126 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4127 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4128 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4129 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4130 }
4131#endif
4132 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4133 }
4134
4135 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4136 {
4137 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4138 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4139 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4140 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4141 {
4142 uint64_t u64Cr3;
4143 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4144 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4145 {
4146 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4147 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4148 }
4149
4150 /*
4151 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4152 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4153 */
4154 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4155 {
4156 X86PDPE aPaePdpes[4];
4157 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4158 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4159 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4160 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4161 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4162 {
4163 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4164 /* PGM now updates PAE PDPTEs while updating CR3. */
4165 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4166 }
4167 }
4168 }
4169 }
4170
4171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4172 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4173 {
4174 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4175 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4176 {
4177 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4178 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4179 AssertRCReturn(rc, rc);
4180 }
4181 }
4182#endif
4183
4184 /* Update fExtrn. */
4185 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4186
4187 /* If everything has been imported, clear the HM keeper bit. */
4188 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4189 {
4190#ifndef IN_NEM_DARWIN
4191 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4192#else
4193 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4194#endif
4195 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4196 }
4197
4198 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4199
4200 /*
4201 * Honor any pending CR3 updates.
4202 *
4203 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4204 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4205 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4206 *
4207 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4208 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4209 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4210 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4211 *
4212 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4213 *
4214 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4215 */
4216#ifndef IN_NEM_DARWIN
4217 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4218 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4219 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4220 return VINF_SUCCESS;
4221 ASMSetFlags(fEFlags);
4222#else
4223 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4224 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4225 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4226 return VINF_SUCCESS;
4227 RT_NOREF_PV(fEFlags);
4228#endif
4229
4230 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4231 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4233 return VINF_SUCCESS;
4234}
4235
4236
4237/**
4238 * Internal state fetcher.
4239 *
4240 * @returns VBox status code.
4241 * @param pVCpu The cross context virtual CPU structure.
4242 * @param pVmcsInfo The VMCS info. object.
4243 * @param pszCaller For logging.
4244 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4245 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4246 * already. This is ORed together with @a a_fWhat when
4247 * calculating what needs fetching (just for safety).
4248 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4249 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4250 * already. This is ORed together with @a a_fWhat when
4251 * calculating what needs fetching (just for safety).
4252 */
4253template<uint64_t const a_fWhat,
4254 uint64_t const a_fDoneLocal = 0,
4255 uint64_t const a_fDonePostExit = 0
4256#ifndef IN_NEM_DARWIN
4257 | CPUMCTX_EXTRN_INHIBIT_INT
4258 | CPUMCTX_EXTRN_INHIBIT_NMI
4259# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4260 | HMVMX_CPUMCTX_EXTRN_ALL
4261# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4262 | CPUMCTX_EXTRN_RFLAGS
4263# endif
4264#else /* IN_NEM_DARWIN */
4265 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4266#endif /* IN_NEM_DARWIN */
4267>
4268DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4269{
4270 RT_NOREF_PV(pszCaller);
4271 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4272 {
4273#ifndef IN_NEM_DARWIN
4274 /*
4275 * We disable interrupts to make the updating of the state and in particular
4276 * the fExtrn modification atomic wrt to preemption hooks.
4277 */
4278 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4279#else
4280 RTCCUINTREG const fEFlags = 0;
4281#endif
4282
4283 /*
4284 * We combine all three parameters and take the (probably) inlined optimized
4285 * code path for the new things specified in a_fWhat.
4286 *
4287 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4288 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4289 * also take the streamlined path when both of these are cleared in fExtrn
4290 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4291 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4292 */
4293 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4294 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4295 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4296 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4297 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4298 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4299 {
4300 int const rc = vmxHCImportGuestStateInner< a_fWhat
4301 & HMVMX_CPUMCTX_EXTRN_ALL
4302 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4303#ifndef IN_NEM_DARWIN
4304 ASMSetFlags(fEFlags);
4305#endif
4306 return rc;
4307 }
4308
4309#ifndef IN_NEM_DARWIN
4310 ASMSetFlags(fEFlags);
4311#endif
4312
4313 /*
4314 * We shouldn't normally get here, but it may happen when executing
4315 * in the debug run-loops. Typically, everything should already have
4316 * been fetched then. Otherwise call the fallback state import function.
4317 */
4318 if (fWhatToDo == 0)
4319 { /* hope the cause was the debug loop or something similar */ }
4320 else
4321 {
4322 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4323 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4324 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4325 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4326 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4327 }
4328 }
4329 return VINF_SUCCESS;
4330}
4331
4332
4333/**
4334 * Check per-VM and per-VCPU force flag actions that require us to go back to
4335 * ring-3 for one reason or another.
4336 *
4337 * @returns Strict VBox status code (i.e. informational status codes too)
4338 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4339 * ring-3.
4340 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4341 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4342 * interrupts)
4343 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4344 * all EMTs to be in ring-3.
4345 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4346 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4347 * to the EM loop.
4348 *
4349 * @param pVCpu The cross context virtual CPU structure.
4350 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4351 * @param fStepping Whether we are single-stepping the guest using the
4352 * hypervisor debugger.
4353 *
4354 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4355 * is no longer in VMX non-root mode.
4356 */
4357static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4358{
4359#ifndef IN_NEM_DARWIN
4360 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4361#endif
4362
4363 /*
4364 * Update pending interrupts into the APIC's IRR.
4365 */
4366 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4367 APICUpdatePendingInterrupts(pVCpu);
4368
4369 /*
4370 * Anything pending? Should be more likely than not if we're doing a good job.
4371 */
4372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4373 if ( !fStepping
4374 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4375 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4376 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4377 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4378 return VINF_SUCCESS;
4379
4380 /* Pending PGM C3 sync. */
4381 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4382 {
4383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4384 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4385 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4386 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4387 if (rcStrict != VINF_SUCCESS)
4388 {
4389 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4390 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4391 return rcStrict;
4392 }
4393 }
4394
4395 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4396 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4397 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4398 {
4399 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4400 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4401 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4402 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4403 return rc;
4404 }
4405
4406 /* Pending VM request packets, such as hardware interrupts. */
4407 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4408 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4411 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4412 return VINF_EM_PENDING_REQUEST;
4413 }
4414
4415 /* Pending PGM pool flushes. */
4416 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4417 {
4418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4419 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4420 return VINF_PGM_POOL_FLUSH_PENDING;
4421 }
4422
4423 /* Pending DMA requests. */
4424 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4425 {
4426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4427 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4428 return VINF_EM_RAW_TO_R3;
4429 }
4430
4431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4432 /*
4433 * Pending nested-guest events.
4434 *
4435 * Please note the priority of these events are specified and important.
4436 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4437 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4438 */
4439 if (fIsNestedGuest)
4440 {
4441 /* Pending nested-guest APIC-write. */
4442 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4443 {
4444 Log4Func(("Pending nested-guest APIC-write\n"));
4445 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4446 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4447 return rcStrict;
4448 }
4449
4450 /* Pending nested-guest monitor-trap flag (MTF). */
4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4452 {
4453 Log4Func(("Pending nested-guest MTF\n"));
4454 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4455 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4456 return rcStrict;
4457 }
4458
4459 /* Pending nested-guest VMX-preemption timer expired. */
4460 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4461 {
4462 Log4Func(("Pending nested-guest preempt timer\n"));
4463 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4464 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4465 return rcStrict;
4466 }
4467 }
4468#else
4469 NOREF(fIsNestedGuest);
4470#endif
4471
4472 return VINF_SUCCESS;
4473}
4474
4475
4476/**
4477 * Converts any TRPM trap into a pending HM event. This is typically used when
4478 * entering from ring-3 (not longjmp returns).
4479 *
4480 * @param pVCpu The cross context virtual CPU structure.
4481 */
4482static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4483{
4484 Assert(TRPMHasTrap(pVCpu));
4485 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4486
4487 uint8_t uVector;
4488 TRPMEVENT enmTrpmEvent;
4489 uint32_t uErrCode;
4490 RTGCUINTPTR GCPtrFaultAddress;
4491 uint8_t cbInstr;
4492 bool fIcebp;
4493
4494 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4495 AssertRC(rc);
4496
4497 uint32_t u32IntInfo;
4498 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4499 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4500
4501 rc = TRPMResetTrap(pVCpu);
4502 AssertRC(rc);
4503 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4504 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4505
4506 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4507}
4508
4509
4510/**
4511 * Converts the pending HM event into a TRPM trap.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 */
4515static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4516{
4517 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4518
4519 /* If a trap was already pending, we did something wrong! */
4520 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4521
4522 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4523 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4524 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4525
4526 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4527
4528 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4529 AssertRC(rc);
4530
4531 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4532 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4533
4534 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4535 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4536 else
4537 {
4538 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4539 switch (uVectorType)
4540 {
4541 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4542 TRPMSetTrapDueToIcebp(pVCpu);
4543 RT_FALL_THRU();
4544 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4545 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4546 {
4547 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4548 || ( uVector == X86_XCPT_BP /* INT3 */
4549 || uVector == X86_XCPT_OF /* INTO */
4550 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4551 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4552 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4553 break;
4554 }
4555 }
4556 }
4557
4558 /* We're now done converting the pending event. */
4559 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4560}
4561
4562
4563/**
4564 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4565 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4566 *
4567 * @param pVCpu The cross context virtual CPU structure.
4568 * @param pVmcsInfo The VMCS info. object.
4569 */
4570static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4571{
4572 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4573 {
4574 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4575 {
4576 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4577 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4578 AssertRC(rc);
4579 }
4580 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4581}
4582
4583
4584/**
4585 * Clears the interrupt-window exiting control in the VMCS.
4586 *
4587 * @param pVCpu The cross context virtual CPU structure.
4588 * @param pVmcsInfo The VMCS info. object.
4589 */
4590DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4591{
4592 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4593 {
4594 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4595 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4596 AssertRC(rc);
4597 }
4598}
4599
4600
4601/**
4602 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4603 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4604 *
4605 * @param pVCpu The cross context virtual CPU structure.
4606 * @param pVmcsInfo The VMCS info. object.
4607 */
4608static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4609{
4610 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4611 {
4612 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4613 {
4614 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4615 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4616 AssertRC(rc);
4617 Log4Func(("Setup NMI-window exiting\n"));
4618 }
4619 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4620}
4621
4622
4623/**
4624 * Clears the NMI-window exiting control in the VMCS.
4625 *
4626 * @param pVCpu The cross context virtual CPU structure.
4627 * @param pVmcsInfo The VMCS info. object.
4628 */
4629DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4630{
4631 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4632 {
4633 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4635 AssertRC(rc);
4636 }
4637}
4638
4639
4640/**
4641 * Injects an event into the guest upon VM-entry by updating the relevant fields
4642 * in the VM-entry area in the VMCS.
4643 *
4644 * @returns Strict VBox status code (i.e. informational status codes too).
4645 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4646 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4647 *
4648 * @param pVCpu The cross context virtual CPU structure.
4649 * @param pVmcsInfo The VMCS info object.
4650 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4651 * @param pEvent The event being injected.
4652 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4653 * will be updated if necessary. This cannot not be NULL.
4654 * @param fStepping Whether we're single-stepping guest execution and should
4655 * return VINF_EM_DBG_STEPPED if the event is injected
4656 * directly (registers modified by us, not by hardware on
4657 * VM-entry).
4658 */
4659static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4660 bool fStepping, uint32_t *pfIntrState)
4661{
4662 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4663 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4664 Assert(pfIntrState);
4665
4666#ifdef IN_NEM_DARWIN
4667 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4668#endif
4669
4670 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4671 uint32_t u32IntInfo = pEvent->u64IntInfo;
4672 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4673 uint32_t const cbInstr = pEvent->cbInstr;
4674 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4675 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4676 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4677
4678#ifdef VBOX_STRICT
4679 /*
4680 * Validate the error-code-valid bit for hardware exceptions.
4681 * No error codes for exceptions in real-mode.
4682 *
4683 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4684 */
4685 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4686 && !CPUMIsGuestInRealModeEx(pCtx))
4687 {
4688 switch (uVector)
4689 {
4690 case X86_XCPT_PF:
4691 case X86_XCPT_DF:
4692 case X86_XCPT_TS:
4693 case X86_XCPT_NP:
4694 case X86_XCPT_SS:
4695 case X86_XCPT_GP:
4696 case X86_XCPT_AC:
4697 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4698 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4699 RT_FALL_THRU();
4700 default:
4701 break;
4702 }
4703 }
4704
4705 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4706 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4707 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4708#endif
4709
4710 RT_NOREF(uVector);
4711 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4712 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4713 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4714 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4715 {
4716 Assert(uVector <= X86_XCPT_LAST);
4717 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4718 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4720 }
4721 else
4722 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4723
4724 /*
4725 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4726 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4727 * interrupt handler in the (real-mode) guest.
4728 *
4729 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4730 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4731 */
4732 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4733 {
4734#ifndef IN_NEM_DARWIN
4735 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4736#endif
4737 {
4738 /*
4739 * For CPUs with unrestricted guest execution enabled and with the guest
4740 * in real-mode, we must not set the deliver-error-code bit.
4741 *
4742 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4743 */
4744 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4745 }
4746#ifndef IN_NEM_DARWIN
4747 else
4748 {
4749 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4750 Assert(PDMVmmDevHeapIsEnabled(pVM));
4751 Assert(pVM->hm.s.vmx.pRealModeTSS);
4752 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4753
4754 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4755 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4756 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4760 size_t const cbIdtEntry = sizeof(X86IDTR16);
4761 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4762 {
4763 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4764 if (uVector == X86_XCPT_DF)
4765 return VINF_EM_RESET;
4766
4767 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4768 No error codes for exceptions in real-mode. */
4769 if (uVector == X86_XCPT_GP)
4770 {
4771 static HMEVENT const s_EventXcptDf
4772 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4773 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4774 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4775 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4776 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4777 }
4778
4779 /*
4780 * If we're injecting an event with no valid IDT entry, inject a #GP.
4781 * No error codes for exceptions in real-mode.
4782 *
4783 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4784 */
4785 static HMEVENT const s_EventXcptGp
4786 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4787 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4788 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4789 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4790 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4791 }
4792
4793 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4794 uint16_t uGuestIp = pCtx->ip;
4795 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4796 {
4797 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4798 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4799 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4800 }
4801 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4802 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4803
4804 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4805 X86IDTR16 IdtEntry;
4806 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4807 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4808 AssertRCReturn(rc2, rc2);
4809
4810 /* Construct the stack frame for the interrupt/exception handler. */
4811 VBOXSTRICTRC rcStrict;
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4813 if (rcStrict == VINF_SUCCESS)
4814 {
4815 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4816 if (rcStrict == VINF_SUCCESS)
4817 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4818 }
4819
4820 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4821 if (rcStrict == VINF_SUCCESS)
4822 {
4823 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4824 pCtx->rip = IdtEntry.offSel;
4825 pCtx->cs.Sel = IdtEntry.uSel;
4826 pCtx->cs.ValidSel = IdtEntry.uSel;
4827 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4828 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4829 && uVector == X86_XCPT_PF)
4830 pCtx->cr2 = GCPtrFault;
4831
4832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4833 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4834 | HM_CHANGED_GUEST_RSP);
4835
4836 /*
4837 * If we delivered a hardware exception (other than an NMI) and if there was
4838 * block-by-STI in effect, we should clear it.
4839 */
4840 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4841 {
4842 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4843 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4844 Log4Func(("Clearing inhibition due to STI\n"));
4845 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4846 }
4847
4848 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4849 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4850
4851 /*
4852 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4853 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4854 */
4855 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4856
4857 /*
4858 * If we eventually support nested-guest execution without unrestricted guest execution,
4859 * we should set fInterceptEvents here.
4860 */
4861 Assert(!fIsNestedGuest);
4862
4863 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4864 if (fStepping)
4865 rcStrict = VINF_EM_DBG_STEPPED;
4866 }
4867 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4868 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4869 return rcStrict;
4870 }
4871#else
4872 RT_NOREF(pVmcsInfo);
4873#endif
4874 }
4875
4876 /*
4877 * Validate.
4878 */
4879 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4880 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4881
4882 /*
4883 * Inject the event into the VMCS.
4884 */
4885 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4886 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4887 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4888 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4889 AssertRC(rc);
4890
4891 /*
4892 * Update guest CR2 if this is a page-fault.
4893 */
4894 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4895 pCtx->cr2 = GCPtrFault;
4896
4897 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Evaluates the event to be delivered to the guest and sets it as the pending
4904 * event.
4905 *
4906 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4907 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4908 * NOT restore these force-flags.
4909 *
4910 * @returns Strict VBox status code (i.e. informational status codes too).
4911 * @param pVCpu The cross context virtual CPU structure.
4912 * @param pVmcsInfo The VMCS information structure.
4913 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4914 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4915 */
4916static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4917{
4918 Assert(pfIntrState);
4919 Assert(!TRPMHasTrap(pVCpu));
4920
4921 /*
4922 * Compute/update guest-interruptibility state related FFs.
4923 * The FFs will be used below while evaluating events to be injected.
4924 */
4925 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4926
4927 /*
4928 * Evaluate if a new event needs to be injected.
4929 * An event that's already pending has already performed all necessary checks.
4930 */
4931 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4932 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4933 {
4934 /** @todo SMI. SMIs take priority over NMIs. */
4935
4936 /*
4937 * NMIs.
4938 * NMIs take priority over external interrupts.
4939 */
4940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4941 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4942#endif
4943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4944 {
4945 /*
4946 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4947 *
4948 * For a nested-guest, the FF always indicates the outer guest's ability to
4949 * receive an NMI while the guest-interruptibility state bit depends on whether
4950 * the nested-hypervisor is using virtual-NMIs.
4951 */
4952 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4953 {
4954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4955 if ( fIsNestedGuest
4956 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4957 return IEMExecVmxVmexitXcptNmi(pVCpu);
4958#endif
4959 vmxHCSetPendingXcptNmi(pVCpu);
4960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4961 Log4Func(("NMI pending injection\n"));
4962
4963 /* We've injected the NMI, bail. */
4964 return VINF_SUCCESS;
4965 }
4966 if (!fIsNestedGuest)
4967 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4968 }
4969
4970 /*
4971 * External interrupts (PIC/APIC).
4972 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4973 * We cannot re-request the interrupt from the controller again.
4974 */
4975 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4976 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4977 {
4978 Assert(!DBGFIsStepping(pVCpu));
4979 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4980 AssertRC(rc);
4981
4982 /*
4983 * We must not check EFLAGS directly when executing a nested-guest, use
4984 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4985 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4986 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4987 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4988 *
4989 * See Intel spec. 25.4.1 "Event Blocking".
4990 */
4991 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4992 {
4993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4994 if ( fIsNestedGuest
4995 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4996 {
4997 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4998 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4999 return rcStrict;
5000 }
5001#endif
5002 uint8_t u8Interrupt;
5003 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5004 if (RT_SUCCESS(rc))
5005 {
5006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5007 if ( fIsNestedGuest
5008 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5009 {
5010 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5011 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5012 return rcStrict;
5013 }
5014#endif
5015 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5016 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5017 }
5018 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5019 {
5020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5021
5022 if ( !fIsNestedGuest
5023 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5024 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5025 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5026
5027 /*
5028 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5029 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5030 * need to re-set this force-flag here.
5031 */
5032 }
5033 else
5034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5035
5036 /* We've injected the interrupt or taken necessary action, bail. */
5037 return VINF_SUCCESS;
5038 }
5039 if (!fIsNestedGuest)
5040 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5041 }
5042 }
5043 else if (!fIsNestedGuest)
5044 {
5045 /*
5046 * An event is being injected or we are in an interrupt shadow. Check if another event is
5047 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5048 * the pending event.
5049 */
5050 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5051 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5052 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5053 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5054 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5055 }
5056 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5057
5058 return VINF_SUCCESS;
5059}
5060
5061
5062/**
5063 * Injects any pending events into the guest if the guest is in a state to
5064 * receive them.
5065 *
5066 * @returns Strict VBox status code (i.e. informational status codes too).
5067 * @param pVCpu The cross context virtual CPU structure.
5068 * @param pVmcsInfo The VMCS information structure.
5069 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5070 * @param fIntrState The VT-x guest-interruptibility state.
5071 * @param fStepping Whether we are single-stepping the guest using the
5072 * hypervisor debugger and should return
5073 * VINF_EM_DBG_STEPPED if the event was dispatched
5074 * directly.
5075 */
5076static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5077 uint32_t fIntrState, bool fStepping)
5078{
5079 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5080#ifndef IN_NEM_DARWIN
5081 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5082#endif
5083
5084#ifdef VBOX_STRICT
5085 /*
5086 * Verify guest-interruptibility state.
5087 *
5088 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5089 * since injecting an event may modify the interruptibility state and we must thus always
5090 * use fIntrState.
5091 */
5092 {
5093 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5094 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5095 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5096 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5097 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5098 Assert(!TRPMHasTrap(pVCpu));
5099 NOREF(fBlockMovSS); NOREF(fBlockSti);
5100 }
5101#endif
5102
5103 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5104 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5105 {
5106 /*
5107 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5108 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5109 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5110 *
5111 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5112 */
5113 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5114#ifdef VBOX_STRICT
5115 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5116 {
5117 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5118 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5119 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5120 }
5121 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5122 {
5123 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5124 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5125 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5126 }
5127#endif
5128 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5129 uIntType));
5130
5131 /*
5132 * Inject the event and get any changes to the guest-interruptibility state.
5133 *
5134 * The guest-interruptibility state may need to be updated if we inject the event
5135 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5136 */
5137 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5138 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5139
5140 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5141 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5142 else
5143 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5144 }
5145
5146 /*
5147 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5148 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5149 */
5150 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5151 && !fIsNestedGuest)
5152 {
5153 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5154
5155 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5156 {
5157 /*
5158 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5159 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5160 */
5161 Assert(!DBGFIsStepping(pVCpu));
5162 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5163 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5164 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5165 AssertRC(rc);
5166 }
5167 else
5168 {
5169 /*
5170 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5171 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5172 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5173 * we use MTF, so just make sure it's called before executing guest-code.
5174 */
5175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5176 }
5177 }
5178 /* else: for nested-guest currently handling while merging controls. */
5179
5180 /*
5181 * Finally, update the guest-interruptibility state.
5182 *
5183 * This is required for the real-on-v86 software interrupt injection, for
5184 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5185 */
5186 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5187 AssertRC(rc);
5188
5189 /*
5190 * There's no need to clear the VM-entry interruption-information field here if we're not
5191 * injecting anything. VT-x clears the valid bit on every VM-exit.
5192 *
5193 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5194 */
5195
5196 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5197 return rcStrict;
5198}
5199
5200
5201/**
5202 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5203 * and update error record fields accordingly.
5204 *
5205 * @returns VMX_IGS_* error codes.
5206 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5207 * wrong with the guest state.
5208 *
5209 * @param pVCpu The cross context virtual CPU structure.
5210 * @param pVmcsInfo The VMCS info. object.
5211 *
5212 * @remarks This function assumes our cache of the VMCS controls
5213 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5214 */
5215static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5216{
5217#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5218#define HMVMX_CHECK_BREAK(expr, err) do { \
5219 if (!(expr)) { uError = (err); break; } \
5220 } while (0)
5221
5222 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5223 uint32_t uError = VMX_IGS_ERROR;
5224 uint32_t u32IntrState = 0;
5225#ifndef IN_NEM_DARWIN
5226 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5227 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5228#else
5229 bool const fUnrestrictedGuest = true;
5230#endif
5231 do
5232 {
5233 int rc;
5234
5235 /*
5236 * Guest-interruptibility state.
5237 *
5238 * Read this first so that any check that fails prior to those that actually
5239 * require the guest-interruptibility state would still reflect the correct
5240 * VMCS value and avoids causing further confusion.
5241 */
5242 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5243 AssertRC(rc);
5244
5245 uint32_t u32Val;
5246 uint64_t u64Val;
5247
5248 /*
5249 * CR0.
5250 */
5251 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5252 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5253 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5254 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5255 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5256 if (fUnrestrictedGuest)
5257 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5258
5259 uint64_t u64GuestCr0;
5260 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5263 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5264 if ( !fUnrestrictedGuest
5265 && (u64GuestCr0 & X86_CR0_PG)
5266 && !(u64GuestCr0 & X86_CR0_PE))
5267 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5268
5269 /*
5270 * CR4.
5271 */
5272 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5273 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5274 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5275
5276 uint64_t u64GuestCr4;
5277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5278 AssertRC(rc);
5279 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5280 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5281
5282 /*
5283 * IA32_DEBUGCTL MSR.
5284 */
5285 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5286 AssertRC(rc);
5287 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5288 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5289 {
5290 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5291 }
5292 uint64_t u64DebugCtlMsr = u64Val;
5293
5294#ifdef VBOX_STRICT
5295 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5296 AssertRC(rc);
5297 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5298#endif
5299 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5300
5301 /*
5302 * RIP and RFLAGS.
5303 */
5304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5305 AssertRC(rc);
5306 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5307 if ( !fLongModeGuest
5308 || !pCtx->cs.Attr.n.u1Long)
5309 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5310 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5311 * must be identical if the "IA-32e mode guest" VM-entry
5312 * control is 1 and CS.L is 1. No check applies if the
5313 * CPU supports 64 linear-address bits. */
5314
5315 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5316 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5317 AssertRC(rc);
5318 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5319 VMX_IGS_RFLAGS_RESERVED);
5320 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5321 uint32_t const u32Eflags = u64Val;
5322
5323 if ( fLongModeGuest
5324 || ( fUnrestrictedGuest
5325 && !(u64GuestCr0 & X86_CR0_PE)))
5326 {
5327 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5328 }
5329
5330 uint32_t u32EntryInfo;
5331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5332 AssertRC(rc);
5333 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5334 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5335
5336 /*
5337 * 64-bit checks.
5338 */
5339 if (fLongModeGuest)
5340 {
5341 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5342 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5343 }
5344
5345 if ( !fLongModeGuest
5346 && (u64GuestCr4 & X86_CR4_PCIDE))
5347 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5348
5349 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5350 * 51:32 beyond the processor's physical-address width are 0. */
5351
5352 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5353 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5354 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5355
5356#ifndef IN_NEM_DARWIN
5357 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5360
5361 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5362 AssertRC(rc);
5363 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5364#endif
5365
5366 /*
5367 * PERF_GLOBAL MSR.
5368 */
5369 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5370 {
5371 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5372 AssertRC(rc);
5373 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5374 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5375 }
5376
5377 /*
5378 * PAT MSR.
5379 */
5380 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5381 {
5382 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5383 AssertRC(rc);
5384 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5385 for (unsigned i = 0; i < 8; i++)
5386 {
5387 uint8_t u8Val = (u64Val & 0xff);
5388 if ( u8Val != 0 /* UC */
5389 && u8Val != 1 /* WC */
5390 && u8Val != 4 /* WT */
5391 && u8Val != 5 /* WP */
5392 && u8Val != 6 /* WB */
5393 && u8Val != 7 /* UC- */)
5394 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5395 u64Val >>= 8;
5396 }
5397 }
5398
5399 /*
5400 * EFER MSR.
5401 */
5402 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5403 {
5404 Assert(g_fHmVmxSupportsVmcsEfer);
5405 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5406 AssertRC(rc);
5407 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5408 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5409 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5410 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5411 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5412 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5413 * iemVmxVmentryCheckGuestState(). */
5414 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5415 || !(u64GuestCr0 & X86_CR0_PG)
5416 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5417 VMX_IGS_EFER_LMA_LME_MISMATCH);
5418 }
5419
5420 /*
5421 * Segment registers.
5422 */
5423 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5424 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5425 if (!(u32Eflags & X86_EFL_VM))
5426 {
5427 /* CS */
5428 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5429 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5430 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5431 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5432 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5433 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5434 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5435 /* CS cannot be loaded with NULL in protected mode. */
5436 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5437 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5438 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5440 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5442 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5443 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5444 else
5445 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5446
5447 /* SS */
5448 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5449 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5450 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5451 if ( !(pCtx->cr0 & X86_CR0_PE)
5452 || pCtx->cs.Attr.n.u4Type == 3)
5453 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5454
5455 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5456 {
5457 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5458 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5459 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5460 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5461 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5462 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5463 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5464 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5465 }
5466
5467 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5468 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5469 {
5470 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5471 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5472 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5473 || pCtx->ds.Attr.n.u4Type > 11
5474 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5475 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5476 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5477 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5478 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5479 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5480 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5481 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5482 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5483 }
5484 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5485 {
5486 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5487 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5488 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5489 || pCtx->es.Attr.n.u4Type > 11
5490 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5491 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5492 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5493 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5494 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5495 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5496 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5497 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5498 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5499 }
5500 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5501 {
5502 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5503 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5504 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5505 || pCtx->fs.Attr.n.u4Type > 11
5506 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5507 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5509 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5510 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5511 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5512 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5513 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5514 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5515 }
5516 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5517 {
5518 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5519 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5520 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5521 || pCtx->gs.Attr.n.u4Type > 11
5522 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5523 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5524 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5525 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5526 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5527 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5528 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5529 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5530 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5531 }
5532 /* 64-bit capable CPUs. */
5533 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5534 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5535 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5536 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5537 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5538 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5539 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5540 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5541 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5542 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5543 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5544 }
5545 else
5546 {
5547 /* V86 mode checks. */
5548 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5549 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5550 {
5551 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5552 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5553 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5554 }
5555 else
5556 {
5557 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5558 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5559 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5560 }
5561
5562 /* CS */
5563 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5564 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5565 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5566 /* SS */
5567 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5569 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5570 /* DS */
5571 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5572 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5573 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5574 /* ES */
5575 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5576 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5577 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5578 /* FS */
5579 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5580 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5581 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5582 /* GS */
5583 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5584 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5585 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5586 /* 64-bit capable CPUs. */
5587 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5588 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5589 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5590 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5591 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5592 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5593 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5594 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5595 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5596 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5597 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5598 }
5599
5600 /*
5601 * TR.
5602 */
5603 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5604 /* 64-bit capable CPUs. */
5605 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5606 if (fLongModeGuest)
5607 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5608 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5609 else
5610 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5611 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5612 VMX_IGS_TR_ATTR_TYPE_INVALID);
5613 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5614 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5615 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5616 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5617 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5618 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5619 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5620 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5621
5622 /*
5623 * GDTR and IDTR (64-bit capable checks).
5624 */
5625 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5626 AssertRC(rc);
5627 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5628
5629 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5630 AssertRC(rc);
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5632
5633 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5634 AssertRC(rc);
5635 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5636
5637 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5638 AssertRC(rc);
5639 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5640
5641 /*
5642 * Guest Non-Register State.
5643 */
5644 /* Activity State. */
5645 uint32_t u32ActivityState;
5646 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5647 AssertRC(rc);
5648 HMVMX_CHECK_BREAK( !u32ActivityState
5649 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5650 VMX_IGS_ACTIVITY_STATE_INVALID);
5651 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5652 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5653
5654 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5655 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5656 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5657
5658 /** @todo Activity state and injecting interrupts. Left as a todo since we
5659 * currently don't use activity states but ACTIVE. */
5660
5661 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5662 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5663
5664 /* Guest interruptibility-state. */
5665 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5666 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5667 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5668 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5669 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5670 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5671 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5672 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5673 {
5674 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5675 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5676 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5677 }
5678 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5679 {
5680 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5681 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5682 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5683 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5684 }
5685 /** @todo Assumes the processor is not in SMM. */
5686 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5687 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5688 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5689 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5690 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5691 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5692 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5693 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5694
5695 /* Pending debug exceptions. */
5696 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5697 AssertRC(rc);
5698 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5699 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5700 u32Val = u64Val; /* For pending debug exceptions checks below. */
5701
5702 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5703 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5704 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5705 {
5706 if ( (u32Eflags & X86_EFL_TF)
5707 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5708 {
5709 /* Bit 14 is PendingDebug.BS. */
5710 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5711 }
5712 if ( !(u32Eflags & X86_EFL_TF)
5713 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5714 {
5715 /* Bit 14 is PendingDebug.BS. */
5716 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5717 }
5718 }
5719
5720#ifndef IN_NEM_DARWIN
5721 /* VMCS link pointer. */
5722 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5723 AssertRC(rc);
5724 if (u64Val != UINT64_C(0xffffffffffffffff))
5725 {
5726 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5727 /** @todo Bits beyond the processor's physical-address width MBZ. */
5728 /** @todo SMM checks. */
5729 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5730 Assert(pVmcsInfo->pvShadowVmcs);
5731 VMXVMCSREVID VmcsRevId;
5732 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5733 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5734 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5735 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5736 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5737 }
5738
5739 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5740 * not using nested paging? */
5741 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5742 && !fLongModeGuest
5743 && CPUMIsGuestInPAEModeEx(pCtx))
5744 {
5745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5746 AssertRC(rc);
5747 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5748
5749 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5750 AssertRC(rc);
5751 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5752
5753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5756
5757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5760 }
5761#endif
5762
5763 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5764 if (uError == VMX_IGS_ERROR)
5765 uError = VMX_IGS_REASON_NOT_FOUND;
5766 } while (0);
5767
5768 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5769 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5770 return uError;
5771
5772#undef HMVMX_ERROR_BREAK
5773#undef HMVMX_CHECK_BREAK
5774}
5775
5776
5777#ifndef HMVMX_USE_FUNCTION_TABLE
5778/**
5779 * Handles a guest VM-exit from hardware-assisted VMX execution.
5780 *
5781 * @returns Strict VBox status code (i.e. informational status codes too).
5782 * @param pVCpu The cross context virtual CPU structure.
5783 * @param pVmxTransient The VMX-transient structure.
5784 */
5785DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5786{
5787#ifdef DEBUG_ramshankar
5788# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5789 do { \
5790 if (a_fSave != 0) \
5791 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5792 VBOXSTRICTRC rcStrict = a_CallExpr; \
5793 if (a_fSave != 0) \
5794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5795 return rcStrict; \
5796 } while (0)
5797#else
5798# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5799#endif
5800 uint32_t const uExitReason = pVmxTransient->uExitReason;
5801 switch (uExitReason)
5802 {
5803 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5804 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5805 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5806 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5807 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5808 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5809 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5810 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5811 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5812 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5813 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5814 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5815 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5816 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5817 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5818 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5819 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5820 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5821 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5822 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5823 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5824 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5825 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5826 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5827 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5828 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5829 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5830 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5831 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5832 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5833#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5834 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5836 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5837 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5838 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5839 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5840 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5841 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5842 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5843 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5844#else
5845 case VMX_EXIT_VMCLEAR:
5846 case VMX_EXIT_VMLAUNCH:
5847 case VMX_EXIT_VMPTRLD:
5848 case VMX_EXIT_VMPTRST:
5849 case VMX_EXIT_VMREAD:
5850 case VMX_EXIT_VMRESUME:
5851 case VMX_EXIT_VMWRITE:
5852 case VMX_EXIT_VMXOFF:
5853 case VMX_EXIT_VMXON:
5854 case VMX_EXIT_INVVPID:
5855 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5856#endif
5857#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5858 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5859#else
5860 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5861#endif
5862
5863 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5864 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5865 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5866
5867 case VMX_EXIT_INIT_SIGNAL:
5868 case VMX_EXIT_SIPI:
5869 case VMX_EXIT_IO_SMI:
5870 case VMX_EXIT_SMI:
5871 case VMX_EXIT_ERR_MSR_LOAD:
5872 case VMX_EXIT_ERR_MACHINE_CHECK:
5873 case VMX_EXIT_PML_FULL:
5874 case VMX_EXIT_VIRTUALIZED_EOI:
5875 case VMX_EXIT_GDTR_IDTR_ACCESS:
5876 case VMX_EXIT_LDTR_TR_ACCESS:
5877 case VMX_EXIT_APIC_WRITE:
5878 case VMX_EXIT_RDRAND:
5879 case VMX_EXIT_RSM:
5880 case VMX_EXIT_VMFUNC:
5881 case VMX_EXIT_ENCLS:
5882 case VMX_EXIT_RDSEED:
5883 case VMX_EXIT_XSAVES:
5884 case VMX_EXIT_XRSTORS:
5885 case VMX_EXIT_UMWAIT:
5886 case VMX_EXIT_TPAUSE:
5887 case VMX_EXIT_LOADIWKEY:
5888 default:
5889 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5890 }
5891#undef VMEXIT_CALL_RET
5892}
5893#endif /* !HMVMX_USE_FUNCTION_TABLE */
5894
5895
5896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5897/**
5898 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5899 *
5900 * @returns Strict VBox status code (i.e. informational status codes too).
5901 * @param pVCpu The cross context virtual CPU structure.
5902 * @param pVmxTransient The VMX-transient structure.
5903 */
5904DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5905{
5906 uint32_t const uExitReason = pVmxTransient->uExitReason;
5907 switch (uExitReason)
5908 {
5909# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5910 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5911 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5912# else
5913 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5914 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5915# endif
5916 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5917 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5918 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5919
5920 /*
5921 * We shouldn't direct host physical interrupts to the nested-guest.
5922 */
5923 case VMX_EXIT_EXT_INT:
5924 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5925
5926 /*
5927 * Instructions that cause VM-exits unconditionally or the condition is
5928 * always taken solely from the nested hypervisor (meaning if the VM-exit
5929 * happens, it's guaranteed to be a nested-guest VM-exit).
5930 *
5931 * - Provides VM-exit instruction length ONLY.
5932 */
5933 case VMX_EXIT_CPUID: /* Unconditional. */
5934 case VMX_EXIT_VMCALL:
5935 case VMX_EXIT_GETSEC:
5936 case VMX_EXIT_INVD:
5937 case VMX_EXIT_XSETBV:
5938 case VMX_EXIT_VMLAUNCH:
5939 case VMX_EXIT_VMRESUME:
5940 case VMX_EXIT_VMXOFF:
5941 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5942 case VMX_EXIT_VMFUNC:
5943 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5944
5945 /*
5946 * Instructions that cause VM-exits unconditionally or the condition is
5947 * always taken solely from the nested hypervisor (meaning if the VM-exit
5948 * happens, it's guaranteed to be a nested-guest VM-exit).
5949 *
5950 * - Provides VM-exit instruction length.
5951 * - Provides VM-exit information.
5952 * - Optionally provides Exit qualification.
5953 *
5954 * Since Exit qualification is 0 for all VM-exits where it is not
5955 * applicable, reading and passing it to the guest should produce
5956 * defined behavior.
5957 *
5958 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5959 */
5960 case VMX_EXIT_INVEPT: /* Unconditional. */
5961 case VMX_EXIT_INVVPID:
5962 case VMX_EXIT_VMCLEAR:
5963 case VMX_EXIT_VMPTRLD:
5964 case VMX_EXIT_VMPTRST:
5965 case VMX_EXIT_VMXON:
5966 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5967 case VMX_EXIT_LDTR_TR_ACCESS:
5968 case VMX_EXIT_RDRAND:
5969 case VMX_EXIT_RDSEED:
5970 case VMX_EXIT_XSAVES:
5971 case VMX_EXIT_XRSTORS:
5972 case VMX_EXIT_UMWAIT:
5973 case VMX_EXIT_TPAUSE:
5974 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5975
5976 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5981 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5982 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5983 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5984 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5988 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5990 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5991 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5992 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5993 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5994 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5995
5996 case VMX_EXIT_PREEMPT_TIMER:
5997 {
5998 /** @todo NSTVMX: Preempt timer. */
5999 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6000 }
6001
6002 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6003 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6004
6005 case VMX_EXIT_VMREAD:
6006 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6007
6008 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6009 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6010
6011 case VMX_EXIT_INIT_SIGNAL:
6012 case VMX_EXIT_SIPI:
6013 case VMX_EXIT_IO_SMI:
6014 case VMX_EXIT_SMI:
6015 case VMX_EXIT_ERR_MSR_LOAD:
6016 case VMX_EXIT_ERR_MACHINE_CHECK:
6017 case VMX_EXIT_PML_FULL:
6018 case VMX_EXIT_RSM:
6019 default:
6020 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6021 }
6022}
6023#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6024
6025
6026/** @name VM-exit helpers.
6027 * @{
6028 */
6029/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6030/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6031/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6032
6033/** Macro for VM-exits called unexpectedly. */
6034#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6035 do { \
6036 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6037 return VERR_VMX_UNEXPECTED_EXIT; \
6038 } while (0)
6039
6040#ifdef VBOX_STRICT
6041# ifndef IN_NEM_DARWIN
6042/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6043# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6044 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6045
6046# define HMVMX_ASSERT_PREEMPT_CPUID() \
6047 do { \
6048 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6049 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6050 } while (0)
6051
6052# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6053 do { \
6054 AssertPtr((a_pVCpu)); \
6055 AssertPtr((a_pVmxTransient)); \
6056 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6057 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6058 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6059 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6060 Assert((a_pVmxTransient)->pVmcsInfo); \
6061 Assert(ASMIntAreEnabled()); \
6062 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6063 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6064 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6065 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6066 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6067 HMVMX_ASSERT_PREEMPT_CPUID(); \
6068 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6069 } while (0)
6070# else
6071# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6072# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6073# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6074 do { \
6075 AssertPtr((a_pVCpu)); \
6076 AssertPtr((a_pVmxTransient)); \
6077 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6078 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6079 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6080 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6081 Assert((a_pVmxTransient)->pVmcsInfo); \
6082 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6083 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6084 } while (0)
6085# endif
6086
6087# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6088 do { \
6089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6090 Assert((a_pVmxTransient)->fIsNestedGuest); \
6091 } while (0)
6092
6093# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6094 do { \
6095 Log4Func(("\n")); \
6096 } while (0)
6097#else
6098# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6099 do { \
6100 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6101 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6102 } while (0)
6103
6104# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6105 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6106
6107# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6108#endif
6109
6110#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6111/** Macro that does the necessary privilege checks and intercepted VM-exits for
6112 * guests that attempted to execute a VMX instruction. */
6113# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6114 do \
6115 { \
6116 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6117 if (rcStrictTmp == VINF_SUCCESS) \
6118 { /* likely */ } \
6119 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6120 { \
6121 Assert((a_pVCpu)->hm.s.Event.fPending); \
6122 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6123 return VINF_SUCCESS; \
6124 } \
6125 else \
6126 { \
6127 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6128 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6129 } \
6130 } while (0)
6131
6132/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6133# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6134 do \
6135 { \
6136 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6137 (a_pGCPtrEffAddr)); \
6138 if (rcStrictTmp == VINF_SUCCESS) \
6139 { /* likely */ } \
6140 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6141 { \
6142 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6143 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6144 NOREF(uXcptTmp); \
6145 return VINF_SUCCESS; \
6146 } \
6147 else \
6148 { \
6149 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6150 return rcStrictTmp; \
6151 } \
6152 } while (0)
6153#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6154
6155
6156/**
6157 * Advances the guest RIP by the specified number of bytes.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure.
6160 * @param cbInstr Number of bytes to advance the RIP by.
6161 *
6162 * @remarks No-long-jump zone!!!
6163 */
6164DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6165{
6166 /* Advance the RIP. */
6167 pVCpu->cpum.GstCtx.rip += cbInstr;
6168 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6169 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
6170 /** @todo clear RF? */
6171}
6172
6173
6174/**
6175 * Advances the guest RIP after reading it from the VMCS.
6176 *
6177 * @returns VBox status code, no informational status codes.
6178 * @param pVCpu The cross context virtual CPU structure.
6179 * @param pVmxTransient The VMX-transient structure.
6180 *
6181 * @remarks No-long-jump zone!!!
6182 */
6183static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6184{
6185 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6186 /** @todo consider template here after checking callers. */
6187 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6188 AssertRCReturn(rc, rc);
6189
6190 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6191 return VINF_SUCCESS;
6192}
6193
6194
6195/**
6196 * Handle a condition that occurred while delivering an event through the guest or
6197 * nested-guest IDT.
6198 *
6199 * @returns Strict VBox status code (i.e. informational status codes too).
6200 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6201 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6202 * to continue execution of the guest which will delivery the \#DF.
6203 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6204 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6205 *
6206 * @param pVCpu The cross context virtual CPU structure.
6207 * @param pVmxTransient The VMX-transient structure.
6208 *
6209 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6210 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6211 * is due to an EPT violation, PML full or SPP-related event.
6212 *
6213 * @remarks No-long-jump zone!!!
6214 */
6215static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6216{
6217 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6218 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6219 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6220 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6221 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6222 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6223
6224 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6225 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6226 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6227 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6228 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6229 {
6230 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6231 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6232
6233 /*
6234 * If the event was a software interrupt (generated with INT n) or a software exception
6235 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6236 * can handle the VM-exit and continue guest execution which will re-execute the
6237 * instruction rather than re-injecting the exception, as that can cause premature
6238 * trips to ring-3 before injection and involve TRPM which currently has no way of
6239 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6240 * the problem).
6241 */
6242 IEMXCPTRAISE enmRaise;
6243 IEMXCPTRAISEINFO fRaiseInfo;
6244 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6245 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6246 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6247 {
6248 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6249 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6250 }
6251 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6252 {
6253 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6254 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6255 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6256
6257 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6258 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6259
6260 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6261
6262 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6263 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6264 {
6265 pVmxTransient->fVectoringPF = true;
6266 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6267 }
6268 }
6269 else
6270 {
6271 /*
6272 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6273 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6274 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6275 */
6276 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6277 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6278 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6279 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6280 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6281 }
6282
6283 /*
6284 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6285 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6286 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6287 * subsequent VM-entry would fail, see @bugref{7445}.
6288 *
6289 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6290 */
6291 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6292 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6293 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6294 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6295 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6296
6297 switch (enmRaise)
6298 {
6299 case IEMXCPTRAISE_CURRENT_XCPT:
6300 {
6301 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6302 Assert(rcStrict == VINF_SUCCESS);
6303 break;
6304 }
6305
6306 case IEMXCPTRAISE_PREV_EVENT:
6307 {
6308 uint32_t u32ErrCode;
6309 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6310 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6311 else
6312 u32ErrCode = 0;
6313
6314 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6315 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6316 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6317 pVCpu->cpum.GstCtx.cr2);
6318
6319 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6320 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6321 Assert(rcStrict == VINF_SUCCESS);
6322 break;
6323 }
6324
6325 case IEMXCPTRAISE_REEXEC_INSTR:
6326 Assert(rcStrict == VINF_SUCCESS);
6327 break;
6328
6329 case IEMXCPTRAISE_DOUBLE_FAULT:
6330 {
6331 /*
6332 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6333 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6334 */
6335 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6336 {
6337 pVmxTransient->fVectoringDoublePF = true;
6338 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6339 pVCpu->cpum.GstCtx.cr2));
6340 rcStrict = VINF_SUCCESS;
6341 }
6342 else
6343 {
6344 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6345 vmxHCSetPendingXcptDF(pVCpu);
6346 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6347 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6348 rcStrict = VINF_HM_DOUBLE_FAULT;
6349 }
6350 break;
6351 }
6352
6353 case IEMXCPTRAISE_TRIPLE_FAULT:
6354 {
6355 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6356 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6357 rcStrict = VINF_EM_RESET;
6358 break;
6359 }
6360
6361 case IEMXCPTRAISE_CPU_HANG:
6362 {
6363 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6364 rcStrict = VERR_EM_GUEST_CPU_HANG;
6365 break;
6366 }
6367
6368 default:
6369 {
6370 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6371 rcStrict = VERR_VMX_IPE_2;
6372 break;
6373 }
6374 }
6375 }
6376 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6377 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6378 {
6379 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6380 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6381 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6382 {
6383 /*
6384 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6385 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6386 * that virtual NMIs remain blocked until the IRET execution is completed.
6387 *
6388 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6389 */
6390 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6391 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6392 }
6393 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6394 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6395 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6396 {
6397 /*
6398 * Execution of IRET caused an EPT violation, page-modification log-full event or
6399 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6400 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6401 * that virtual NMIs remain blocked until the IRET execution is completed.
6402 *
6403 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6404 */
6405 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6406 {
6407 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6408 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6409 }
6410 }
6411 }
6412
6413 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6414 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6415 return rcStrict;
6416}
6417
6418
6419#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6420/**
6421 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6422 * guest attempting to execute a VMX instruction.
6423 *
6424 * @returns Strict VBox status code (i.e. informational status codes too).
6425 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6426 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6427 *
6428 * @param pVCpu The cross context virtual CPU structure.
6429 * @param uExitReason The VM-exit reason.
6430 *
6431 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6432 * @remarks No-long-jump zone!!!
6433 */
6434static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6435{
6436 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6437 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6438
6439 /*
6440 * The physical CPU would have already checked the CPU mode/code segment.
6441 * We shall just assert here for paranoia.
6442 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6443 */
6444 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6445 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6446 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6447
6448 if (uExitReason == VMX_EXIT_VMXON)
6449 {
6450 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6451
6452 /*
6453 * We check CR4.VMXE because it is required to be always set while in VMX operation
6454 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6455 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6456 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6457 */
6458 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6459 {
6460 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6461 vmxHCSetPendingXcptUD(pVCpu);
6462 return VINF_HM_PENDING_XCPT;
6463 }
6464 }
6465 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6466 {
6467 /*
6468 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6469 * (other than VMXON), we need to raise a #UD.
6470 */
6471 Log4Func(("Not in VMX root mode -> #UD\n"));
6472 vmxHCSetPendingXcptUD(pVCpu);
6473 return VINF_HM_PENDING_XCPT;
6474 }
6475
6476 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6477 return VINF_SUCCESS;
6478}
6479
6480
6481/**
6482 * Decodes the memory operand of an instruction that caused a VM-exit.
6483 *
6484 * The Exit qualification field provides the displacement field for memory
6485 * operand instructions, if any.
6486 *
6487 * @returns Strict VBox status code (i.e. informational status codes too).
6488 * @retval VINF_SUCCESS if the operand was successfully decoded.
6489 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6490 * operand.
6491 * @param pVCpu The cross context virtual CPU structure.
6492 * @param uExitInstrInfo The VM-exit instruction information field.
6493 * @param enmMemAccess The memory operand's access type (read or write).
6494 * @param GCPtrDisp The instruction displacement field, if any. For
6495 * RIP-relative addressing pass RIP + displacement here.
6496 * @param pGCPtrMem Where to store the effective destination memory address.
6497 *
6498 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6499 * virtual-8086 mode hence skips those checks while verifying if the
6500 * segment is valid.
6501 */
6502static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6503 PRTGCPTR pGCPtrMem)
6504{
6505 Assert(pGCPtrMem);
6506 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6507 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6508 | CPUMCTX_EXTRN_CR0);
6509
6510 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6511 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6512 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6513
6514 VMXEXITINSTRINFO ExitInstrInfo;
6515 ExitInstrInfo.u = uExitInstrInfo;
6516 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6517 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6518 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6519 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6520 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6521 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6522 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6523 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6524 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6525
6526 /*
6527 * Validate instruction information.
6528 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6529 */
6530 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6531 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6532 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6533 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6534 AssertLogRelMsgReturn(fIsMemOperand,
6535 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6536
6537 /*
6538 * Compute the complete effective address.
6539 *
6540 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6541 * See AMD spec. 4.5.2 "Segment Registers".
6542 */
6543 RTGCPTR GCPtrMem = GCPtrDisp;
6544 if (fBaseRegValid)
6545 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6546 if (fIdxRegValid)
6547 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6548
6549 RTGCPTR const GCPtrOff = GCPtrMem;
6550 if ( !fIsLongMode
6551 || iSegReg >= X86_SREG_FS)
6552 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6553 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6554
6555 /*
6556 * Validate effective address.
6557 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6558 */
6559 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6560 Assert(cbAccess > 0);
6561 if (fIsLongMode)
6562 {
6563 if (X86_IS_CANONICAL(GCPtrMem))
6564 {
6565 *pGCPtrMem = GCPtrMem;
6566 return VINF_SUCCESS;
6567 }
6568
6569 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6570 * "Data Limit Checks in 64-bit Mode". */
6571 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6572 vmxHCSetPendingXcptGP(pVCpu, 0);
6573 return VINF_HM_PENDING_XCPT;
6574 }
6575
6576 /*
6577 * This is a watered down version of iemMemApplySegment().
6578 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6579 * and segment CPL/DPL checks are skipped.
6580 */
6581 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6582 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6583 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6584
6585 /* Check if the segment is present and usable. */
6586 if ( pSel->Attr.n.u1Present
6587 && !pSel->Attr.n.u1Unusable)
6588 {
6589 Assert(pSel->Attr.n.u1DescType);
6590 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6591 {
6592 /* Check permissions for the data segment. */
6593 if ( enmMemAccess == VMXMEMACCESS_WRITE
6594 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6595 {
6596 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6597 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6598 return VINF_HM_PENDING_XCPT;
6599 }
6600
6601 /* Check limits if it's a normal data segment. */
6602 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6603 {
6604 if ( GCPtrFirst32 > pSel->u32Limit
6605 || GCPtrLast32 > pSel->u32Limit)
6606 {
6607 Log4Func(("Data segment limit exceeded. "
6608 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6609 GCPtrLast32, pSel->u32Limit));
6610 if (iSegReg == X86_SREG_SS)
6611 vmxHCSetPendingXcptSS(pVCpu, 0);
6612 else
6613 vmxHCSetPendingXcptGP(pVCpu, 0);
6614 return VINF_HM_PENDING_XCPT;
6615 }
6616 }
6617 else
6618 {
6619 /* Check limits if it's an expand-down data segment.
6620 Note! The upper boundary is defined by the B bit, not the G bit! */
6621 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6622 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6623 {
6624 Log4Func(("Expand-down data segment limit exceeded. "
6625 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6626 GCPtrLast32, pSel->u32Limit));
6627 if (iSegReg == X86_SREG_SS)
6628 vmxHCSetPendingXcptSS(pVCpu, 0);
6629 else
6630 vmxHCSetPendingXcptGP(pVCpu, 0);
6631 return VINF_HM_PENDING_XCPT;
6632 }
6633 }
6634 }
6635 else
6636 {
6637 /* Check permissions for the code segment. */
6638 if ( enmMemAccess == VMXMEMACCESS_WRITE
6639 || ( enmMemAccess == VMXMEMACCESS_READ
6640 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6641 {
6642 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6643 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6644 vmxHCSetPendingXcptGP(pVCpu, 0);
6645 return VINF_HM_PENDING_XCPT;
6646 }
6647
6648 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6649 if ( GCPtrFirst32 > pSel->u32Limit
6650 || GCPtrLast32 > pSel->u32Limit)
6651 {
6652 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6653 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6654 if (iSegReg == X86_SREG_SS)
6655 vmxHCSetPendingXcptSS(pVCpu, 0);
6656 else
6657 vmxHCSetPendingXcptGP(pVCpu, 0);
6658 return VINF_HM_PENDING_XCPT;
6659 }
6660 }
6661 }
6662 else
6663 {
6664 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6665 vmxHCSetPendingXcptGP(pVCpu, 0);
6666 return VINF_HM_PENDING_XCPT;
6667 }
6668
6669 *pGCPtrMem = GCPtrMem;
6670 return VINF_SUCCESS;
6671}
6672#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6673
6674
6675/**
6676 * VM-exit helper for LMSW.
6677 */
6678static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6679{
6680 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6681 AssertRCReturn(rc, rc);
6682
6683 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6684 AssertMsg( rcStrict == VINF_SUCCESS
6685 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6686
6687 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6688 if (rcStrict == VINF_IEM_RAISED_XCPT)
6689 {
6690 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6691 rcStrict = VINF_SUCCESS;
6692 }
6693
6694 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6695 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6696 return rcStrict;
6697}
6698
6699
6700/**
6701 * VM-exit helper for CLTS.
6702 */
6703static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6704{
6705 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6706 AssertRCReturn(rc, rc);
6707
6708 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6709 AssertMsg( rcStrict == VINF_SUCCESS
6710 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6711
6712 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6713 if (rcStrict == VINF_IEM_RAISED_XCPT)
6714 {
6715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6716 rcStrict = VINF_SUCCESS;
6717 }
6718
6719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6720 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6721 return rcStrict;
6722}
6723
6724
6725/**
6726 * VM-exit helper for MOV from CRx (CRx read).
6727 */
6728static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6729{
6730 Assert(iCrReg < 16);
6731 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6732
6733 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6734 AssertRCReturn(rc, rc);
6735
6736 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6737 AssertMsg( rcStrict == VINF_SUCCESS
6738 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6739
6740 if (iGReg == X86_GREG_xSP)
6741 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6742 else
6743 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6744#ifdef VBOX_WITH_STATISTICS
6745 switch (iCrReg)
6746 {
6747 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6748 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6749 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6750 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6751 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6752 }
6753#endif
6754 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6755 return rcStrict;
6756}
6757
6758
6759/**
6760 * VM-exit helper for MOV to CRx (CRx write).
6761 */
6762static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6763{
6764 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6765
6766 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6767 AssertMsg( rcStrict == VINF_SUCCESS
6768 || rcStrict == VINF_IEM_RAISED_XCPT
6769 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6770
6771 switch (iCrReg)
6772 {
6773 case 0:
6774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6775 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6776 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6777 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6778 break;
6779
6780 case 2:
6781 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6782 /* Nothing to do here, CR2 it's not part of the VMCS. */
6783 break;
6784
6785 case 3:
6786 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6787 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6788 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6789 break;
6790
6791 case 4:
6792 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6793 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6794#ifndef IN_NEM_DARWIN
6795 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6796 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6797#else
6798 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6799#endif
6800 break;
6801
6802 case 8:
6803 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6804 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6805 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6806 break;
6807
6808 default:
6809 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6810 break;
6811 }
6812
6813 if (rcStrict == VINF_IEM_RAISED_XCPT)
6814 {
6815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6816 rcStrict = VINF_SUCCESS;
6817 }
6818 return rcStrict;
6819}
6820
6821
6822/**
6823 * VM-exit exception handler for \#PF (Page-fault exception).
6824 *
6825 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6826 */
6827static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6828{
6829 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6830 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6831
6832#ifndef IN_NEM_DARWIN
6833 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6834 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6835 { /* likely */ }
6836 else
6837#endif
6838 {
6839#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6840 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6841#endif
6842 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6843 if (!pVmxTransient->fVectoringDoublePF)
6844 {
6845 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6846 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6847 }
6848 else
6849 {
6850 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6851 Assert(!pVmxTransient->fIsNestedGuest);
6852 vmxHCSetPendingXcptDF(pVCpu);
6853 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6854 }
6855 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6856 return VINF_SUCCESS;
6857 }
6858
6859 Assert(!pVmxTransient->fIsNestedGuest);
6860
6861 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6862 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6863 if (pVmxTransient->fVectoringPF)
6864 {
6865 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6866 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6867 }
6868
6869 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6870 AssertRCReturn(rc, rc);
6871
6872 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6873 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6874
6875 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6876 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6877
6878 Log4Func(("#PF: rc=%Rrc\n", rc));
6879 if (rc == VINF_SUCCESS)
6880 {
6881 /*
6882 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6883 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6884 */
6885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6886 TRPMResetTrap(pVCpu);
6887 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6888 return rc;
6889 }
6890
6891 if (rc == VINF_EM_RAW_GUEST_TRAP)
6892 {
6893 if (!pVmxTransient->fVectoringDoublePF)
6894 {
6895 /* It's a guest page fault and needs to be reflected to the guest. */
6896 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6897 TRPMResetTrap(pVCpu);
6898 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6899 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6900 uGstErrorCode, pVmxTransient->uExitQual);
6901 }
6902 else
6903 {
6904 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6905 TRPMResetTrap(pVCpu);
6906 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6907 vmxHCSetPendingXcptDF(pVCpu);
6908 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6909 }
6910
6911 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6912 return VINF_SUCCESS;
6913 }
6914
6915 TRPMResetTrap(pVCpu);
6916 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6917 return rc;
6918}
6919
6920
6921/**
6922 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6923 *
6924 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6925 */
6926static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6927{
6928 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6929 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6930
6931 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6932 AssertRCReturn(rc, rc);
6933
6934 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6935 {
6936 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6937 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6938
6939 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6940 * provides VM-exit instruction length. If this causes problem later,
6941 * disassemble the instruction like it's done on AMD-V. */
6942 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6943 AssertRCReturn(rc2, rc2);
6944 return rc;
6945 }
6946
6947 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6948 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6949 return VINF_SUCCESS;
6950}
6951
6952
6953/**
6954 * VM-exit exception handler for \#BP (Breakpoint exception).
6955 *
6956 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6957 */
6958static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6959{
6960 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6961 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6962
6963 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6964 AssertRCReturn(rc, rc);
6965
6966 VBOXSTRICTRC rcStrict;
6967 if (!pVmxTransient->fIsNestedGuest)
6968 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6969 else
6970 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6971
6972 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6973 {
6974 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6975 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6976 rcStrict = VINF_SUCCESS;
6977 }
6978
6979 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6980 return rcStrict;
6981}
6982
6983
6984/**
6985 * VM-exit exception handler for \#AC (Alignment-check exception).
6986 *
6987 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6988 */
6989static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6990{
6991 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6992
6993 /*
6994 * Detect #ACs caused by host having enabled split-lock detection.
6995 * Emulate such instructions.
6996 */
6997#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6998 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6999 AssertRCReturn(rc, rc);
7000 /** @todo detect split lock in cpu feature? */
7001 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7002 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7003 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7004 || CPUMGetGuestCPL(pVCpu) != 3
7005 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7006 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7007 {
7008 /*
7009 * Check for debug/trace events and import state accordingly.
7010 */
7011 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7012 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7013 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7014#ifndef IN_NEM_DARWIN
7015 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7016#endif
7017 )
7018 {
7019 if (pVM->cCpus == 1)
7020 {
7021#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7022 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7023 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7024#else
7025 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7026 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7027#endif
7028 AssertRCReturn(rc, rc);
7029 }
7030 }
7031 else
7032 {
7033 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7034 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7035 AssertRCReturn(rc, rc);
7036
7037 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7038
7039 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7040 {
7041 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7042 if (rcStrict != VINF_SUCCESS)
7043 return rcStrict;
7044 }
7045 }
7046
7047 /*
7048 * Emulate the instruction.
7049 *
7050 * We have to ignore the LOCK prefix here as we must not retrigger the
7051 * detection on the host. This isn't all that satisfactory, though...
7052 */
7053 if (pVM->cCpus == 1)
7054 {
7055 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7056 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7057
7058 /** @todo For SMP configs we should do a rendezvous here. */
7059 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7060 if (rcStrict == VINF_SUCCESS)
7061#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7063 HM_CHANGED_GUEST_RIP
7064 | HM_CHANGED_GUEST_RFLAGS
7065 | HM_CHANGED_GUEST_GPRS_MASK
7066 | HM_CHANGED_GUEST_CS
7067 | HM_CHANGED_GUEST_SS);
7068#else
7069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7070#endif
7071 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7072 {
7073 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7074 rcStrict = VINF_SUCCESS;
7075 }
7076 return rcStrict;
7077 }
7078 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7079 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7080 return VINF_EM_EMULATE_SPLIT_LOCK;
7081 }
7082
7083 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7084 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7085 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7086
7087 /* Re-inject it. We'll detect any nesting before getting here. */
7088 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7089 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7090 return VINF_SUCCESS;
7091}
7092
7093
7094/**
7095 * VM-exit exception handler for \#DB (Debug exception).
7096 *
7097 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7098 */
7099static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7100{
7101 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7102 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7103
7104 /*
7105 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7106 */
7107 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7108
7109 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7110 uint64_t const uDR6 = X86_DR6_INIT_VAL
7111 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7112 | X86_DR6_BD | X86_DR6_BS));
7113
7114 int rc;
7115 if (!pVmxTransient->fIsNestedGuest)
7116 {
7117 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7118
7119 /*
7120 * Prevents stepping twice over the same instruction when the guest is stepping using
7121 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7122 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7123 */
7124 if ( rc == VINF_EM_DBG_STEPPED
7125 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7126 {
7127 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7128 rc = VINF_EM_RAW_GUEST_TRAP;
7129 }
7130 }
7131 else
7132 rc = VINF_EM_RAW_GUEST_TRAP;
7133 Log6Func(("rc=%Rrc\n", rc));
7134 if (rc == VINF_EM_RAW_GUEST_TRAP)
7135 {
7136 /*
7137 * The exception was for the guest. Update DR6, DR7.GD and
7138 * IA32_DEBUGCTL.LBR before forwarding it.
7139 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7140 */
7141#ifndef IN_NEM_DARWIN
7142 VMMRZCallRing3Disable(pVCpu);
7143 HM_DISABLE_PREEMPT(pVCpu);
7144
7145 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7146 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7147 if (CPUMIsGuestDebugStateActive(pVCpu))
7148 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7149
7150 HM_RESTORE_PREEMPT();
7151 VMMRZCallRing3Enable(pVCpu);
7152#else
7153 /** @todo */
7154#endif
7155
7156 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7157 AssertRCReturn(rc, rc);
7158
7159 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7160 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7161
7162 /* Paranoia. */
7163 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7164 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7165
7166 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7167 AssertRC(rc);
7168
7169 /*
7170 * Raise #DB in the guest.
7171 *
7172 * It is important to reflect exactly what the VM-exit gave us (preserving the
7173 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7174 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7175 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7176 *
7177 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7178 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7179 */
7180 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7181 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7182 return VINF_SUCCESS;
7183 }
7184
7185 /*
7186 * Not a guest trap, must be a hypervisor related debug event then.
7187 * Update DR6 in case someone is interested in it.
7188 */
7189 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7190 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7191 CPUMSetHyperDR6(pVCpu, uDR6);
7192
7193 return rc;
7194}
7195
7196
7197/**
7198 * Hacks its way around the lovely mesa driver's backdoor accesses.
7199 *
7200 * @sa hmR0SvmHandleMesaDrvGp.
7201 */
7202static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7203{
7204 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7205 RT_NOREF(pCtx);
7206
7207 /* For now we'll just skip the instruction. */
7208 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7209}
7210
7211
7212/**
7213 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7214 * backdoor logging w/o checking what it is running inside.
7215 *
7216 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7217 * backdoor port and magic numbers loaded in registers.
7218 *
7219 * @returns true if it is, false if it isn't.
7220 * @sa hmR0SvmIsMesaDrvGp.
7221 */
7222DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7223{
7224 /* 0xed: IN eAX,dx */
7225 uint8_t abInstr[1];
7226 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7227 return false;
7228
7229 /* Check that it is #GP(0). */
7230 if (pVmxTransient->uExitIntErrorCode != 0)
7231 return false;
7232
7233 /* Check magic and port. */
7234 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7235 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7236 if (pCtx->rax != UINT32_C(0x564d5868))
7237 return false;
7238 if (pCtx->dx != UINT32_C(0x5658))
7239 return false;
7240
7241 /* Flat ring-3 CS. */
7242 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7243 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7244 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7245 if (pCtx->cs.Attr.n.u2Dpl != 3)
7246 return false;
7247 if (pCtx->cs.u64Base != 0)
7248 return false;
7249
7250 /* Check opcode. */
7251 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7252 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7253 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7254 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7255 if (RT_FAILURE(rc))
7256 return false;
7257 if (abInstr[0] != 0xed)
7258 return false;
7259
7260 return true;
7261}
7262
7263
7264/**
7265 * VM-exit exception handler for \#GP (General-protection exception).
7266 *
7267 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7268 */
7269static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7270{
7271 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7272 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7273
7274 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7275 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7276#ifndef IN_NEM_DARWIN
7277 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7278 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7279 { /* likely */ }
7280 else
7281#endif
7282 {
7283#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7284# ifndef IN_NEM_DARWIN
7285 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7286# else
7287 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7288# endif
7289#endif
7290 /*
7291 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7292 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7293 */
7294 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7295 AssertRCReturn(rc, rc);
7296 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7297 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7298
7299 if ( pVmxTransient->fIsNestedGuest
7300 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7301 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7302 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7303 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7304 else
7305 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7306 return rc;
7307 }
7308
7309#ifndef IN_NEM_DARWIN
7310 Assert(CPUMIsGuestInRealModeEx(pCtx));
7311 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7312 Assert(!pVmxTransient->fIsNestedGuest);
7313
7314 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7315 AssertRCReturn(rc, rc);
7316
7317 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7318 if (rcStrict == VINF_SUCCESS)
7319 {
7320 if (!CPUMIsGuestInRealModeEx(pCtx))
7321 {
7322 /*
7323 * The guest is no longer in real-mode, check if we can continue executing the
7324 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7325 */
7326 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7327 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7328 {
7329 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7330 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7331 }
7332 else
7333 {
7334 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7335 rcStrict = VINF_EM_RESCHEDULE;
7336 }
7337 }
7338 else
7339 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7340 }
7341 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7342 {
7343 rcStrict = VINF_SUCCESS;
7344 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7345 }
7346 return VBOXSTRICTRC_VAL(rcStrict);
7347#endif
7348}
7349
7350
7351/**
7352 * VM-exit exception handler for \#DE (Divide Error).
7353 *
7354 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7355 */
7356static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7357{
7358 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7359 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7360
7361 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7362 AssertRCReturn(rc, rc);
7363
7364 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7365 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7366 {
7367 uint8_t cbInstr = 0;
7368 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7369 if (rc2 == VINF_SUCCESS)
7370 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7371 else if (rc2 == VERR_NOT_FOUND)
7372 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7373 else
7374 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7375 }
7376 else
7377 rcStrict = VINF_SUCCESS; /* Do nothing. */
7378
7379 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7380 if (RT_FAILURE(rcStrict))
7381 {
7382 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7383 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7384 rcStrict = VINF_SUCCESS;
7385 }
7386
7387 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7388 return VBOXSTRICTRC_VAL(rcStrict);
7389}
7390
7391
7392/**
7393 * VM-exit exception handler wrapper for all other exceptions that are not handled
7394 * by a specific handler.
7395 *
7396 * This simply re-injects the exception back into the VM without any special
7397 * processing.
7398 *
7399 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7400 */
7401static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7402{
7403 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7404
7405#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7406# ifndef IN_NEM_DARWIN
7407 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7408 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7409 ("uVector=%#x u32XcptBitmap=%#X32\n",
7410 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7411 NOREF(pVmcsInfo);
7412# endif
7413#endif
7414
7415 /*
7416 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7417 * would have been handled while checking exits due to event delivery.
7418 */
7419 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7420
7421#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7422 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7423 AssertRCReturn(rc, rc);
7424 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7425#endif
7426
7427#ifdef VBOX_WITH_STATISTICS
7428 switch (uVector)
7429 {
7430 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7431 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7432 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7433 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7434 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7435 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7436 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7437 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7438 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7439 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7440 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7441 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7442 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7443 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7444 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7445 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7446 default:
7447 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7448 break;
7449 }
7450#endif
7451
7452 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7453 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7454 NOREF(uVector);
7455
7456 /* Re-inject the original exception into the guest. */
7457 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7458 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7459 return VINF_SUCCESS;
7460}
7461
7462
7463/**
7464 * VM-exit exception handler for all exceptions (except NMIs!).
7465 *
7466 * @remarks This may be called for both guests and nested-guests. Take care to not
7467 * make assumptions and avoid doing anything that is not relevant when
7468 * executing a nested-guest (e.g., Mesa driver hacks).
7469 */
7470static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7471{
7472 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7473
7474 /*
7475 * If this VM-exit occurred while delivering an event through the guest IDT, take
7476 * action based on the return code and additional hints (e.g. for page-faults)
7477 * that will be updated in the VMX transient structure.
7478 */
7479 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7480 if (rcStrict == VINF_SUCCESS)
7481 {
7482 /*
7483 * If an exception caused a VM-exit due to delivery of an event, the original
7484 * event may have to be re-injected into the guest. We shall reinject it and
7485 * continue guest execution. However, page-fault is a complicated case and
7486 * needs additional processing done in vmxHCExitXcptPF().
7487 */
7488 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7489 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7490 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7491 || uVector == X86_XCPT_PF)
7492 {
7493 switch (uVector)
7494 {
7495 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7496 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7497 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7498 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7499 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7500 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7501 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7502 default:
7503 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7504 }
7505 }
7506 /* else: inject pending event before resuming guest execution. */
7507 }
7508 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7509 {
7510 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7511 rcStrict = VINF_SUCCESS;
7512 }
7513
7514 return rcStrict;
7515}
7516/** @} */
7517
7518
7519/** @name VM-exit handlers.
7520 * @{
7521 */
7522/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7523/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7524/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7525
7526/**
7527 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7528 */
7529HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7530{
7531 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7532 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7533
7534#ifndef IN_NEM_DARWIN
7535 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7536 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7537 return VINF_SUCCESS;
7538 return VINF_EM_RAW_INTERRUPT;
7539#else
7540 return VINF_SUCCESS;
7541#endif
7542}
7543
7544
7545/**
7546 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7547 * VM-exit.
7548 */
7549HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7550{
7551 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7552 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7553
7554 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7555
7556 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7557 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7558 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7559
7560 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7561 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7562 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7563 NOREF(pVmcsInfo);
7564
7565 VBOXSTRICTRC rcStrict;
7566 switch (uExitIntType)
7567 {
7568#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7569 /*
7570 * Host physical NMIs:
7571 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7572 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7573 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7574 *
7575 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7576 * See Intel spec. 27.5.5 "Updating Non-Register State".
7577 */
7578 case VMX_EXIT_INT_INFO_TYPE_NMI:
7579 {
7580 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7581 break;
7582 }
7583#endif
7584
7585 /*
7586 * Privileged software exceptions (#DB from ICEBP),
7587 * Software exceptions (#BP and #OF),
7588 * Hardware exceptions:
7589 * Process the required exceptions and resume guest execution if possible.
7590 */
7591 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7592 Assert(uVector == X86_XCPT_DB);
7593 RT_FALL_THRU();
7594 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7595 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7596 RT_FALL_THRU();
7597 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7598 {
7599 NOREF(uVector);
7600 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7601 | HMVMX_READ_EXIT_INSTR_LEN
7602 | HMVMX_READ_IDT_VECTORING_INFO
7603 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7604 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7605 break;
7606 }
7607
7608 default:
7609 {
7610 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7611 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7612 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7613 break;
7614 }
7615 }
7616
7617 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7618 return rcStrict;
7619}
7620
7621
7622/**
7623 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7624 */
7625HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7626{
7627 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7628
7629 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7630 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7631 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7632
7633 /* Evaluate and deliver pending events and resume guest execution. */
7634 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7635 return VINF_SUCCESS;
7636}
7637
7638
7639/**
7640 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7641 */
7642HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7643{
7644 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7645
7646 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7647 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7648 {
7649 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7650 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7651 }
7652
7653 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7654
7655 /*
7656 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7657 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7658 */
7659 uint32_t fIntrState;
7660 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7661 AssertRC(rc);
7662 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7663 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7664 {
7665 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7666
7667 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7668 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7669 AssertRC(rc);
7670 }
7671
7672 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7673 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7674
7675 /* Evaluate and deliver pending events and resume guest execution. */
7676 return VINF_SUCCESS;
7677}
7678
7679
7680/**
7681 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7682 */
7683HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7684{
7685 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7686 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7687}
7688
7689
7690/**
7691 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7692 */
7693HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7694{
7695 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7696 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7697}
7698
7699
7700/**
7701 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7702 */
7703HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7704{
7705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7706
7707 /*
7708 * Get the state we need and update the exit history entry.
7709 */
7710 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7711 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7712 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7713 AssertRCReturn(rc, rc);
7714
7715 VBOXSTRICTRC rcStrict;
7716 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7717 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7718 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7719 if (!pExitRec)
7720 {
7721 /*
7722 * Regular CPUID instruction execution.
7723 */
7724 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7725 if (rcStrict == VINF_SUCCESS)
7726 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7727 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7728 {
7729 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7730 rcStrict = VINF_SUCCESS;
7731 }
7732 }
7733 else
7734 {
7735 /*
7736 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7737 */
7738 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7739 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7740 AssertRCReturn(rc2, rc2);
7741
7742 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7743 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7744
7745 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7746 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7747
7748 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7749 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7750 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7751 }
7752 return rcStrict;
7753}
7754
7755
7756/**
7757 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7758 */
7759HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7760{
7761 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7762
7763 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7764 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7765 AssertRCReturn(rc, rc);
7766
7767 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7768 return VINF_EM_RAW_EMULATE_INSTR;
7769
7770 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7771 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7772}
7773
7774
7775/**
7776 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7777 */
7778HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7779{
7780 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7781
7782 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7783 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7784 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7785 AssertRCReturn(rc, rc);
7786
7787 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7788 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7789 {
7790 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7791 we must reset offsetting on VM-entry. See @bugref{6634}. */
7792 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7793 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7795 }
7796 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7797 {
7798 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7799 rcStrict = VINF_SUCCESS;
7800 }
7801 return rcStrict;
7802}
7803
7804
7805/**
7806 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7807 */
7808HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7809{
7810 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7811
7812 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7813 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7814 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7815 AssertRCReturn(rc, rc);
7816
7817 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7818 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7819 {
7820 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7821 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7822 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7823 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7824 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7825 }
7826 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7827 {
7828 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7829 rcStrict = VINF_SUCCESS;
7830 }
7831 return rcStrict;
7832}
7833
7834
7835/**
7836 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7837 */
7838HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7839{
7840 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7841
7842 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7843 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7844 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7845 AssertRCReturn(rc, rc);
7846
7847 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7848 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7849 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7850 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7851 {
7852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7853 rcStrict = VINF_SUCCESS;
7854 }
7855 return rcStrict;
7856}
7857
7858
7859/**
7860 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7861 */
7862HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7863{
7864 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7865
7866 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7867 if (EMAreHypercallInstructionsEnabled(pVCpu))
7868 {
7869 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7870 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7871 | CPUMCTX_EXTRN_RFLAGS
7872 | CPUMCTX_EXTRN_CR0
7873 | CPUMCTX_EXTRN_SS
7874 | CPUMCTX_EXTRN_CS
7875 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7876 AssertRCReturn(rc, rc);
7877
7878 /* Perform the hypercall. */
7879 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7880 if (rcStrict == VINF_SUCCESS)
7881 {
7882 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7883 AssertRCReturn(rc, rc);
7884 }
7885 else
7886 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7887 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7888 || RT_FAILURE(rcStrict));
7889
7890 /* If the hypercall changes anything other than guest's general-purpose registers,
7891 we would need to reload the guest changed bits here before VM-entry. */
7892 }
7893 else
7894 Log4Func(("Hypercalls not enabled\n"));
7895
7896 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7897 if (RT_FAILURE(rcStrict))
7898 {
7899 vmxHCSetPendingXcptUD(pVCpu);
7900 rcStrict = VINF_SUCCESS;
7901 }
7902
7903 return rcStrict;
7904}
7905
7906
7907/**
7908 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7909 */
7910HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7911{
7912 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7913#ifndef IN_NEM_DARWIN
7914 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7915#endif
7916
7917 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7918 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7919 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7920 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7921 AssertRCReturn(rc, rc);
7922
7923 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7924
7925 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7927 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7928 {
7929 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7930 rcStrict = VINF_SUCCESS;
7931 }
7932 else
7933 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7934 VBOXSTRICTRC_VAL(rcStrict)));
7935 return rcStrict;
7936}
7937
7938
7939/**
7940 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7941 */
7942HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7943{
7944 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7945
7946 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7947 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7948 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7949 AssertRCReturn(rc, rc);
7950
7951 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7952 if (rcStrict == VINF_SUCCESS)
7953 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7954 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7955 {
7956 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7957 rcStrict = VINF_SUCCESS;
7958 }
7959
7960 return rcStrict;
7961}
7962
7963
7964/**
7965 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7966 */
7967HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7968{
7969 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7970
7971 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7972 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7973 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7974 AssertRCReturn(rc, rc);
7975
7976 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7977 if (RT_SUCCESS(rcStrict))
7978 {
7979 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7980 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7981 rcStrict = VINF_SUCCESS;
7982 }
7983
7984 return rcStrict;
7985}
7986
7987
7988/**
7989 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7990 * VM-exit.
7991 */
7992HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7993{
7994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7995 return VINF_EM_RESET;
7996}
7997
7998
7999/**
8000 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8001 */
8002HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8003{
8004 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8005
8006 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8007 AssertRCReturn(rc, rc);
8008
8009 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8010 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8011 rc = VINF_SUCCESS;
8012 else
8013 rc = VINF_EM_HALT;
8014
8015 if (rc != VINF_SUCCESS)
8016 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8017 return rc;
8018}
8019
8020
8021#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8022/**
8023 * VM-exit handler for instructions that result in a \#UD exception delivered to
8024 * the guest.
8025 */
8026HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8027{
8028 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8029 vmxHCSetPendingXcptUD(pVCpu);
8030 return VINF_SUCCESS;
8031}
8032#endif
8033
8034
8035/**
8036 * VM-exit handler for expiry of the VMX-preemption timer.
8037 */
8038HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8039{
8040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8041
8042 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8043 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8044Log12(("vmxHCExitPreemptTimer:\n"));
8045
8046 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8047 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8048 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8049 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8050 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8051}
8052
8053
8054/**
8055 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8056 */
8057HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8058{
8059 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8060
8061 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8062 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8063 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8064 AssertRCReturn(rc, rc);
8065
8066 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8067 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8068 : HM_CHANGED_RAISED_XCPT_MASK);
8069
8070#ifndef IN_NEM_DARWIN
8071 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8072 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8073 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8074 {
8075 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8076 hmR0VmxUpdateStartVmFunction(pVCpu);
8077 }
8078#endif
8079
8080 return rcStrict;
8081}
8082
8083
8084/**
8085 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8086 */
8087HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8088{
8089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8090
8091 /** @todo Enable the new code after finding a reliably guest test-case. */
8092#if 1
8093 return VERR_EM_INTERPRETER;
8094#else
8095 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8096 | HMVMX_READ_EXIT_INSTR_INFO
8097 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8098 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8099 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8100 AssertRCReturn(rc, rc);
8101
8102 /* Paranoia. Ensure this has a memory operand. */
8103 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8104
8105 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8106 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8107 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8108 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8109
8110 RTGCPTR GCPtrDesc;
8111 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8112
8113 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8114 GCPtrDesc, uType);
8115 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8116 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8117 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8118 {
8119 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8120 rcStrict = VINF_SUCCESS;
8121 }
8122 return rcStrict;
8123#endif
8124}
8125
8126
8127/**
8128 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8129 * VM-exit.
8130 */
8131HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8132{
8133 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8134 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8135 AssertRCReturn(rc, rc);
8136
8137 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8138 if (RT_FAILURE(rc))
8139 return rc;
8140
8141 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8142 NOREF(uInvalidReason);
8143
8144#ifdef VBOX_STRICT
8145 uint32_t fIntrState;
8146 uint64_t u64Val;
8147 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8148 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8149 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8150
8151 Log4(("uInvalidReason %u\n", uInvalidReason));
8152 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8153 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8154 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8155
8156 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8157 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8158 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8159 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8160 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8161 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8162 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8163 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8164 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8165 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8166 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8167 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8168# ifndef IN_NEM_DARWIN
8169 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8170 {
8171 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8172 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8173 }
8174
8175 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8176# endif
8177#endif
8178
8179 return VERR_VMX_INVALID_GUEST_STATE;
8180}
8181
8182/**
8183 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8184 */
8185HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8186{
8187 /*
8188 * Cumulative notes of all recognized but unexpected VM-exits.
8189 *
8190 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8191 * nested-paging is used.
8192 *
8193 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8194 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8195 * this function (and thereby stop VM execution) for handling such instructions.
8196 *
8197 *
8198 * VMX_EXIT_INIT_SIGNAL:
8199 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8200 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8201 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8202 *
8203 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8204 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8205 * See Intel spec. "23.8 Restrictions on VMX operation".
8206 *
8207 * VMX_EXIT_SIPI:
8208 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8209 * activity state is used. We don't make use of it as our guests don't have direct
8210 * access to the host local APIC.
8211 *
8212 * See Intel spec. 25.3 "Other Causes of VM-exits".
8213 *
8214 * VMX_EXIT_IO_SMI:
8215 * VMX_EXIT_SMI:
8216 * This can only happen if we support dual-monitor treatment of SMI, which can be
8217 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8218 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8219 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8220 *
8221 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8222 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8223 *
8224 * VMX_EXIT_ERR_MSR_LOAD:
8225 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8226 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8227 * execution.
8228 *
8229 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8230 *
8231 * VMX_EXIT_ERR_MACHINE_CHECK:
8232 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8233 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8234 * #MC exception abort class exception is raised. We thus cannot assume a
8235 * reasonable chance of continuing any sort of execution and we bail.
8236 *
8237 * See Intel spec. 15.1 "Machine-check Architecture".
8238 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8239 *
8240 * VMX_EXIT_PML_FULL:
8241 * VMX_EXIT_VIRTUALIZED_EOI:
8242 * VMX_EXIT_APIC_WRITE:
8243 * We do not currently support any of these features and thus they are all unexpected
8244 * VM-exits.
8245 *
8246 * VMX_EXIT_GDTR_IDTR_ACCESS:
8247 * VMX_EXIT_LDTR_TR_ACCESS:
8248 * VMX_EXIT_RDRAND:
8249 * VMX_EXIT_RSM:
8250 * VMX_EXIT_VMFUNC:
8251 * VMX_EXIT_ENCLS:
8252 * VMX_EXIT_RDSEED:
8253 * VMX_EXIT_XSAVES:
8254 * VMX_EXIT_XRSTORS:
8255 * VMX_EXIT_UMWAIT:
8256 * VMX_EXIT_TPAUSE:
8257 * VMX_EXIT_LOADIWKEY:
8258 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8259 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8260 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8261 *
8262 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8263 */
8264 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8265 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8266 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8267}
8268
8269
8270/**
8271 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8272 */
8273HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8274{
8275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8276
8277 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8278
8279 /** @todo Optimize this: We currently drag in the whole MSR state
8280 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8281 * MSRs required. That would require changes to IEM and possibly CPUM too.
8282 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8283 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8284 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8285 int rc;
8286 switch (idMsr)
8287 {
8288 default:
8289 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8290 __FUNCTION__);
8291 AssertRCReturn(rc, rc);
8292 break;
8293 case MSR_K8_FS_BASE:
8294 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8295 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8296 AssertRCReturn(rc, rc);
8297 break;
8298 case MSR_K8_GS_BASE:
8299 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8300 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8301 AssertRCReturn(rc, rc);
8302 break;
8303 }
8304
8305 Log4Func(("ecx=%#RX32\n", idMsr));
8306
8307#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8308 Assert(!pVmxTransient->fIsNestedGuest);
8309 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8310 {
8311 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8312 && idMsr != MSR_K6_EFER)
8313 {
8314 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8315 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8316 }
8317 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8318 {
8319 Assert(pVmcsInfo->pvMsrBitmap);
8320 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8321 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8322 {
8323 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8324 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8325 }
8326 }
8327 }
8328#endif
8329
8330 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8331 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8332 if (rcStrict == VINF_SUCCESS)
8333 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8334 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8335 {
8336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8337 rcStrict = VINF_SUCCESS;
8338 }
8339 else
8340 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8341 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8342
8343 return rcStrict;
8344}
8345
8346
8347/**
8348 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8349 */
8350HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8351{
8352 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8353
8354 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8355
8356 /*
8357 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8358 * Although we don't need to fetch the base as it will be overwritten shortly, while
8359 * loading guest-state we would also load the entire segment register including limit
8360 * and attributes and thus we need to load them here.
8361 */
8362 /** @todo Optimize this: We currently drag in the whole MSR state
8363 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8364 * MSRs required. That would require changes to IEM and possibly CPUM too.
8365 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8366 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8367 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8368 int rc;
8369 switch (idMsr)
8370 {
8371 default:
8372 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8373 __FUNCTION__);
8374 AssertRCReturn(rc, rc);
8375 break;
8376
8377 case MSR_K8_FS_BASE:
8378 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8379 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8380 AssertRCReturn(rc, rc);
8381 break;
8382 case MSR_K8_GS_BASE:
8383 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8384 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8385 AssertRCReturn(rc, rc);
8386 break;
8387 }
8388 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8389
8390 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8391 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8392
8393 if (rcStrict == VINF_SUCCESS)
8394 {
8395 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8396
8397 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8398 if ( idMsr == MSR_IA32_APICBASE
8399 || ( idMsr >= MSR_IA32_X2APIC_START
8400 && idMsr <= MSR_IA32_X2APIC_END))
8401 {
8402 /*
8403 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8404 * When full APIC register virtualization is implemented we'll have to make
8405 * sure APIC state is saved from the VMCS before IEM changes it.
8406 */
8407 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8408 }
8409 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8410 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8411 else if (idMsr == MSR_K6_EFER)
8412 {
8413 /*
8414 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8415 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8416 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8417 */
8418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8419 }
8420
8421 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8422 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8423 {
8424 switch (idMsr)
8425 {
8426 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8427 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8428 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8429 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8430 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8431 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8432 default:
8433 {
8434#ifndef IN_NEM_DARWIN
8435 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8436 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8437 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8438 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8439#else
8440 AssertMsgFailed(("TODO\n"));
8441#endif
8442 break;
8443 }
8444 }
8445 }
8446#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8447 else
8448 {
8449 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8450 switch (idMsr)
8451 {
8452 case MSR_IA32_SYSENTER_CS:
8453 case MSR_IA32_SYSENTER_EIP:
8454 case MSR_IA32_SYSENTER_ESP:
8455 case MSR_K8_FS_BASE:
8456 case MSR_K8_GS_BASE:
8457 {
8458 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8459 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8460 }
8461
8462 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8463 default:
8464 {
8465 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8466 {
8467 /* EFER MSR writes are always intercepted. */
8468 if (idMsr != MSR_K6_EFER)
8469 {
8470 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8471 idMsr));
8472 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8473 }
8474 }
8475
8476 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8477 {
8478 Assert(pVmcsInfo->pvMsrBitmap);
8479 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8480 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8481 {
8482 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8483 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8484 }
8485 }
8486 break;
8487 }
8488 }
8489 }
8490#endif /* VBOX_STRICT */
8491 }
8492 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8493 {
8494 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8495 rcStrict = VINF_SUCCESS;
8496 }
8497 else
8498 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8499 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8500
8501 return rcStrict;
8502}
8503
8504
8505/**
8506 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8507 */
8508HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8509{
8510 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8511
8512 /** @todo The guest has likely hit a contended spinlock. We might want to
8513 * poke a schedule different guest VCPU. */
8514 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8515 if (RT_SUCCESS(rc))
8516 return VINF_EM_RAW_INTERRUPT;
8517
8518 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8519 return rc;
8520}
8521
8522
8523/**
8524 * VM-exit handler for when the TPR value is lowered below the specified
8525 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8526 */
8527HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8528{
8529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8530 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8531
8532 /*
8533 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8534 * We'll re-evaluate pending interrupts and inject them before the next VM
8535 * entry so we can just continue execution here.
8536 */
8537 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8538 return VINF_SUCCESS;
8539}
8540
8541
8542/**
8543 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8544 * VM-exit.
8545 *
8546 * @retval VINF_SUCCESS when guest execution can continue.
8547 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8548 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8549 * incompatible guest state for VMX execution (real-on-v86 case).
8550 */
8551HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8552{
8553 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8554 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8555
8556 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8557 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8558 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8559
8560 VBOXSTRICTRC rcStrict;
8561 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8562 uint64_t const uExitQual = pVmxTransient->uExitQual;
8563 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8564 switch (uAccessType)
8565 {
8566 /*
8567 * MOV to CRx.
8568 */
8569 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8570 {
8571 /*
8572 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8573 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8574 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8575 * PAE PDPTEs as well.
8576 */
8577 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8578 AssertRCReturn(rc, rc);
8579
8580 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8581#ifndef IN_NEM_DARWIN
8582 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8583#endif
8584 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8585 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8586
8587 /*
8588 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8589 * - When nested paging isn't used.
8590 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8591 * - We are executing in the VM debug loop.
8592 */
8593#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8594# ifndef IN_NEM_DARWIN
8595 Assert( iCrReg != 3
8596 || !VM_IS_VMX_NESTED_PAGING(pVM)
8597 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8598 || pVCpu->hmr0.s.fUsingDebugLoop);
8599# else
8600 Assert( iCrReg != 3
8601 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8602# endif
8603#endif
8604
8605 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8606 Assert( iCrReg != 8
8607 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8608
8609 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8610 AssertMsg( rcStrict == VINF_SUCCESS
8611 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8612
8613#ifndef IN_NEM_DARWIN
8614 /*
8615 * This is a kludge for handling switches back to real mode when we try to use
8616 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8617 * deal with special selector values, so we have to return to ring-3 and run
8618 * there till the selector values are V86 mode compatible.
8619 *
8620 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8621 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8622 * this function.
8623 */
8624 if ( iCrReg == 0
8625 && rcStrict == VINF_SUCCESS
8626 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8627 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8628 && (uOldCr0 & X86_CR0_PE)
8629 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8630 {
8631 /** @todo Check selectors rather than returning all the time. */
8632 Assert(!pVmxTransient->fIsNestedGuest);
8633 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8634 rcStrict = VINF_EM_RESCHEDULE_REM;
8635 }
8636#endif
8637
8638 break;
8639 }
8640
8641 /*
8642 * MOV from CRx.
8643 */
8644 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8645 {
8646 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8647 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8648
8649 /*
8650 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8651 * - When nested paging isn't used.
8652 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8653 * - We are executing in the VM debug loop.
8654 */
8655#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8656# ifndef IN_NEM_DARWIN
8657 Assert( iCrReg != 3
8658 || !VM_IS_VMX_NESTED_PAGING(pVM)
8659 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8660 || pVCpu->hmr0.s.fLeaveDone);
8661# else
8662 Assert( iCrReg != 3
8663 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8664# endif
8665#endif
8666
8667 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8668 Assert( iCrReg != 8
8669 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8670
8671 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8672 break;
8673 }
8674
8675 /*
8676 * CLTS (Clear Task-Switch Flag in CR0).
8677 */
8678 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8679 {
8680 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8681 break;
8682 }
8683
8684 /*
8685 * LMSW (Load Machine-Status Word into CR0).
8686 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8687 */
8688 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8689 {
8690 RTGCPTR GCPtrEffDst;
8691 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8692 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8693 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8694 if (fMemOperand)
8695 {
8696 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8697 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8698 }
8699 else
8700 GCPtrEffDst = NIL_RTGCPTR;
8701 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8702 break;
8703 }
8704
8705 default:
8706 {
8707 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8708 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8709 }
8710 }
8711
8712 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8713 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8714 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8715
8716 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8717 NOREF(pVM);
8718 return rcStrict;
8719}
8720
8721
8722/**
8723 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8724 * VM-exit.
8725 */
8726HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8727{
8728 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8729 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8730
8731 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8732 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8733 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8734 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8735#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8736 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8737 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8738 AssertRCReturn(rc, rc);
8739
8740 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8741 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8742 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8743 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8744 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8745 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8746 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8747 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8748
8749 /*
8750 * Update exit history to see if this exit can be optimized.
8751 */
8752 VBOXSTRICTRC rcStrict;
8753 PCEMEXITREC pExitRec = NULL;
8754 if ( !fGstStepping
8755 && !fDbgStepping)
8756 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8757 !fIOString
8758 ? !fIOWrite
8759 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8760 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8761 : !fIOWrite
8762 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8763 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8764 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8765 if (!pExitRec)
8766 {
8767 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8768 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8769
8770 uint32_t const cbValue = s_aIOSizes[uIOSize];
8771 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8772 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8773 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8774 if (fIOString)
8775 {
8776 /*
8777 * INS/OUTS - I/O String instruction.
8778 *
8779 * Use instruction-information if available, otherwise fall back on
8780 * interpreting the instruction.
8781 */
8782 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8783 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8784 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8785 if (fInsOutsInfo)
8786 {
8787 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8788 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8789 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8790 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8791 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8792 if (fIOWrite)
8793 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8794 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8795 else
8796 {
8797 /*
8798 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8799 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8800 * See Intel Instruction spec. for "INS".
8801 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8802 */
8803 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8804 }
8805 }
8806 else
8807 rcStrict = IEMExecOne(pVCpu);
8808
8809 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8810 fUpdateRipAlready = true;
8811 }
8812 else
8813 {
8814 /*
8815 * IN/OUT - I/O instruction.
8816 */
8817 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8818 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8819 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8820 if (fIOWrite)
8821 {
8822 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8823 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8824#ifndef IN_NEM_DARWIN
8825 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8826 && !pCtx->eflags.Bits.u1TF)
8827 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8828#endif
8829 }
8830 else
8831 {
8832 uint32_t u32Result = 0;
8833 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8834 if (IOM_SUCCESS(rcStrict))
8835 {
8836 /* Save result of I/O IN instr. in AL/AX/EAX. */
8837 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8838 }
8839#ifndef IN_NEM_DARWIN
8840 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8841 && !pCtx->eflags.Bits.u1TF)
8842 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8843#endif
8844 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8845 }
8846 }
8847
8848 if (IOM_SUCCESS(rcStrict))
8849 {
8850 if (!fUpdateRipAlready)
8851 {
8852 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8853 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8854 }
8855
8856 /*
8857 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8858 * while booting Fedora 17 64-bit guest.
8859 *
8860 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8861 */
8862 if (fIOString)
8863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8864
8865 /*
8866 * If any I/O breakpoints are armed, we need to check if one triggered
8867 * and take appropriate action.
8868 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8869 */
8870#if 1
8871 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8872#else
8873 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8874 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8875 AssertRCReturn(rc, rc);
8876#endif
8877
8878 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8879 * execution engines about whether hyper BPs and such are pending. */
8880 uint32_t const uDr7 = pCtx->dr[7];
8881 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8882 && X86_DR7_ANY_RW_IO(uDr7)
8883 && (pCtx->cr4 & X86_CR4_DE))
8884 || DBGFBpIsHwIoArmed(pVM)))
8885 {
8886 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8887
8888#ifndef IN_NEM_DARWIN
8889 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8890 VMMRZCallRing3Disable(pVCpu);
8891 HM_DISABLE_PREEMPT(pVCpu);
8892
8893 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8894
8895 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8896 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8897 {
8898 /* Raise #DB. */
8899 if (fIsGuestDbgActive)
8900 ASMSetDR6(pCtx->dr[6]);
8901 if (pCtx->dr[7] != uDr7)
8902 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8903
8904 vmxHCSetPendingXcptDB(pVCpu);
8905 }
8906 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8907 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8908 else if ( rcStrict2 != VINF_SUCCESS
8909 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8910 rcStrict = rcStrict2;
8911 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8912
8913 HM_RESTORE_PREEMPT();
8914 VMMRZCallRing3Enable(pVCpu);
8915#else
8916 /** @todo */
8917#endif
8918 }
8919 }
8920
8921#ifdef VBOX_STRICT
8922 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8923 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8924 Assert(!fIOWrite);
8925 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8926 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8927 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8928 Assert(fIOWrite);
8929 else
8930 {
8931# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8932 * statuses, that the VMM device and some others may return. See
8933 * IOM_SUCCESS() for guidance. */
8934 AssertMsg( RT_FAILURE(rcStrict)
8935 || rcStrict == VINF_SUCCESS
8936 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8937 || rcStrict == VINF_EM_DBG_BREAKPOINT
8938 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8939 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8940# endif
8941 }
8942#endif
8943 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8944 }
8945 else
8946 {
8947 /*
8948 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8949 */
8950 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8951 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8952 AssertRCReturn(rc2, rc2);
8953 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8954 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8955 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8956 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8957 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8958 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8959
8960 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8962
8963 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8964 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8965 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8966 }
8967 return rcStrict;
8968}
8969
8970
8971/**
8972 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8973 * VM-exit.
8974 */
8975HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8976{
8977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8978
8979 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8980 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8981 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8982 {
8983 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8984 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8985 {
8986 uint32_t uErrCode;
8987 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8988 {
8989 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8990 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8991 }
8992 else
8993 uErrCode = 0;
8994
8995 RTGCUINTPTR GCPtrFaultAddress;
8996 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8997 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8998 else
8999 GCPtrFaultAddress = 0;
9000
9001 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9002
9003 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9004 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9005
9006 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9007 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9008 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9009 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9010 }
9011 }
9012
9013 /* Fall back to the interpreter to emulate the task-switch. */
9014 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9015 return VERR_EM_INTERPRETER;
9016}
9017
9018
9019/**
9020 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9021 */
9022HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9023{
9024 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9025
9026 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9027 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9028 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9029 AssertRC(rc);
9030 return VINF_EM_DBG_STEPPED;
9031}
9032
9033
9034/**
9035 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9036 */
9037HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9038{
9039 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9040 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9041
9042 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9043 | HMVMX_READ_EXIT_INSTR_LEN
9044 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9045 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9046 | HMVMX_READ_IDT_VECTORING_INFO
9047 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9048
9049 /*
9050 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9051 */
9052 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9053 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9054 {
9055 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9056 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9057 {
9058 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9059 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9060 }
9061 }
9062 else
9063 {
9064 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9065 return rcStrict;
9066 }
9067
9068 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9069 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9070 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9071 AssertRCReturn(rc, rc);
9072
9073 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9074 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9075 switch (uAccessType)
9076 {
9077#ifndef IN_NEM_DARWIN
9078 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9079 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9080 {
9081 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9082 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9083 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9084
9085 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9086 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9087 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9088 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9089 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9090
9091 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9092 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9093 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9094 if ( rcStrict == VINF_SUCCESS
9095 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9096 || rcStrict == VERR_PAGE_NOT_PRESENT)
9097 {
9098 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9099 | HM_CHANGED_GUEST_APIC_TPR);
9100 rcStrict = VINF_SUCCESS;
9101 }
9102 break;
9103 }
9104#else
9105 /** @todo */
9106#endif
9107
9108 default:
9109 {
9110 Log4Func(("uAccessType=%#x\n", uAccessType));
9111 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9112 break;
9113 }
9114 }
9115
9116 if (rcStrict != VINF_SUCCESS)
9117 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9118 return rcStrict;
9119}
9120
9121
9122/**
9123 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9124 * VM-exit.
9125 */
9126HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9127{
9128 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9129 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9130
9131 /*
9132 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9133 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9134 * must emulate the MOV DRx access.
9135 */
9136 if (!pVmxTransient->fIsNestedGuest)
9137 {
9138 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9139 if (pVmxTransient->fWasGuestDebugStateActive)
9140 {
9141 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9142 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9143 }
9144
9145 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9146 && !pVmxTransient->fWasHyperDebugStateActive)
9147 {
9148 Assert(!DBGFIsStepping(pVCpu));
9149 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9150
9151 /* Don't intercept MOV DRx any more. */
9152 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9153 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9154 AssertRC(rc);
9155
9156#ifndef IN_NEM_DARWIN
9157 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9158 VMMRZCallRing3Disable(pVCpu);
9159 HM_DISABLE_PREEMPT(pVCpu);
9160
9161 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9162 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9163 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9164
9165 HM_RESTORE_PREEMPT();
9166 VMMRZCallRing3Enable(pVCpu);
9167#else
9168 CPUMR3NemActivateGuestDebugState(pVCpu);
9169 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9170 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9171#endif
9172
9173#ifdef VBOX_WITH_STATISTICS
9174 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9175 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9176 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9177 else
9178 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9179#endif
9180 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9181 return VINF_SUCCESS;
9182 }
9183 }
9184
9185 /*
9186 * Import state. We must have DR7 loaded here as it's always consulted,
9187 * both for reading and writing. The other debug registers are never
9188 * exported as such.
9189 */
9190 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9191 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9192 | CPUMCTX_EXTRN_GPRS_MASK
9193 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9194 AssertRCReturn(rc, rc);
9195 Log4Func(("cs:rip=%#04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9196
9197 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9198 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9199
9200 VBOXSTRICTRC rcStrict;
9201 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9202 {
9203 /*
9204 * Write DRx register.
9205 */
9206 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9207 AssertMsg( rcStrict == VINF_SUCCESS
9208 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9209
9210 if (rcStrict == VINF_SUCCESS)
9211 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9212 * kept it for now to avoid breaking something non-obvious. */
9213 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9214 | HM_CHANGED_GUEST_DR7);
9215 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9216 {
9217 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9218 rcStrict = VINF_SUCCESS;
9219 }
9220
9221 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9222 }
9223 else
9224 {
9225 /*
9226 * Read DRx register into a general purpose register.
9227 */
9228 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9229 AssertMsg( rcStrict == VINF_SUCCESS
9230 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9231
9232 if (rcStrict == VINF_SUCCESS)
9233 {
9234 if (iGReg == X86_GREG_xSP)
9235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9236 | HM_CHANGED_GUEST_RSP);
9237 else
9238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9239 }
9240 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9241 {
9242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9243 rcStrict = VINF_SUCCESS;
9244 }
9245
9246 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9247 }
9248
9249 return rcStrict;
9250}
9251
9252
9253/**
9254 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9255 * Conditional VM-exit.
9256 */
9257HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9258{
9259 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9260
9261#ifndef IN_NEM_DARWIN
9262 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9263
9264 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9265 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9266 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9267 | HMVMX_READ_IDT_VECTORING_INFO
9268 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9269 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9270
9271 /*
9272 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9273 */
9274 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9275 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9276 {
9277 /*
9278 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9279 * instruction emulation to inject the original event. Otherwise, injecting the original event
9280 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9281 */
9282 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9283 { /* likely */ }
9284 else
9285 {
9286 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9287# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9288 /** @todo NSTVMX: Think about how this should be handled. */
9289 if (pVmxTransient->fIsNestedGuest)
9290 return VERR_VMX_IPE_3;
9291# endif
9292 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9293 }
9294 }
9295 else
9296 {
9297 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9298 return rcStrict;
9299 }
9300
9301 /*
9302 * Get sufficient state and update the exit history entry.
9303 */
9304 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9305 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9306 AssertRCReturn(rc, rc);
9307
9308 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9309 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9310 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9311 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9312 if (!pExitRec)
9313 {
9314 /*
9315 * If we succeed, resume guest execution.
9316 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9317 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9318 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9319 * weird case. See @bugref{6043}.
9320 */
9321 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9322/** @todo bird: We can probably just go straight to IOM here and assume that
9323 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9324 * well. However, we need to address that aliasing workarounds that
9325 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9326 *
9327 * Might also be interesting to see if we can get this done more or
9328 * less locklessly inside IOM. Need to consider the lookup table
9329 * updating and use a bit more carefully first (or do all updates via
9330 * rendezvous) */
9331 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9332 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9333 if ( rcStrict == VINF_SUCCESS
9334 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9335 || rcStrict == VERR_PAGE_NOT_PRESENT)
9336 {
9337 /* Successfully handled MMIO operation. */
9338 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9339 | HM_CHANGED_GUEST_APIC_TPR);
9340 rcStrict = VINF_SUCCESS;
9341 }
9342 }
9343 else
9344 {
9345 /*
9346 * Frequent exit or something needing probing. Call EMHistoryExec.
9347 */
9348 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9349 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9350
9351 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9352 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9353
9354 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9355 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9356 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9357 }
9358 return rcStrict;
9359#else
9360 AssertFailed();
9361 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9362#endif
9363}
9364
9365
9366/**
9367 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9368 * VM-exit.
9369 */
9370HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9371{
9372 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9373#ifndef IN_NEM_DARWIN
9374 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9375
9376 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9377 | HMVMX_READ_EXIT_INSTR_LEN
9378 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9379 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9380 | HMVMX_READ_IDT_VECTORING_INFO
9381 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9382 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9383
9384 /*
9385 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9386 */
9387 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9388 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9389 {
9390 /*
9391 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9392 * we shall resolve the nested #PF and re-inject the original event.
9393 */
9394 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9395 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9396 }
9397 else
9398 {
9399 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9400 return rcStrict;
9401 }
9402
9403 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9404 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9405 AssertRCReturn(rc, rc);
9406
9407 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9408 uint64_t const uExitQual = pVmxTransient->uExitQual;
9409 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9410
9411 RTGCUINT uErrorCode = 0;
9412 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9413 uErrorCode |= X86_TRAP_PF_ID;
9414 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9415 uErrorCode |= X86_TRAP_PF_RW;
9416 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9417 uErrorCode |= X86_TRAP_PF_P;
9418
9419 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9420 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9421
9422 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9423
9424 /*
9425 * Handle the pagefault trap for the nested shadow table.
9426 */
9427 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9428 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9429 TRPMResetTrap(pVCpu);
9430
9431 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9432 if ( rcStrict == VINF_SUCCESS
9433 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9434 || rcStrict == VERR_PAGE_NOT_PRESENT)
9435 {
9436 /* Successfully synced our nested page tables. */
9437 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9438 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9439 return VINF_SUCCESS;
9440 }
9441 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9442 return rcStrict;
9443
9444#else /* IN_NEM_DARWIN */
9445 PVM pVM = pVCpu->CTX_SUFF(pVM);
9446 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9447 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9448 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9449 vmxHCImportGuestRip(pVCpu);
9450 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9451
9452 /*
9453 * Ask PGM for information about the given GCPhys. We need to check if we're
9454 * out of sync first.
9455 */
9456 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9457 false,
9458 false };
9459 PGMPHYSNEMPAGEINFO Info;
9460 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9461 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9462 if (RT_SUCCESS(rc))
9463 {
9464 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9465 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9466 {
9467 if (State.fCanResume)
9468 {
9469 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9470 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9471 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9472 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9473 State.fDidSomething ? "" : " no-change"));
9474 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9475 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9476 return VINF_SUCCESS;
9477 }
9478 }
9479
9480 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9481 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9482 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9483 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9484 State.fDidSomething ? "" : " no-change"));
9485 }
9486 else
9487 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9488 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9489 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9490
9491 /*
9492 * Emulate the memory access, either access handler or special memory.
9493 */
9494 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9495 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9496 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9497 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9498 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9499
9500 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9501 AssertRCReturn(rc, rc);
9502
9503 VBOXSTRICTRC rcStrict;
9504 if (!pExitRec)
9505 rcStrict = IEMExecOne(pVCpu);
9506 else
9507 {
9508 /* Frequent access or probing. */
9509 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9510 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9511 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9512 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9513 }
9514
9515 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9516
9517 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9518 return rcStrict;
9519#endif /* IN_NEM_DARWIN */
9520}
9521
9522#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9523
9524/**
9525 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9526 */
9527HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9528{
9529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9530
9531 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9532 | HMVMX_READ_EXIT_INSTR_INFO
9533 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9534 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9535 | CPUMCTX_EXTRN_SREG_MASK
9536 | CPUMCTX_EXTRN_HWVIRT
9537 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9538 AssertRCReturn(rc, rc);
9539
9540 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9541
9542 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9543 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9544
9545 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9547 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9548 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9549 {
9550 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9551 rcStrict = VINF_SUCCESS;
9552 }
9553 return rcStrict;
9554}
9555
9556
9557/**
9558 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9559 */
9560HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9561{
9562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9563
9564 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9565 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9566 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9567 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9568 AssertRCReturn(rc, rc);
9569
9570 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9571
9572 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9573 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9574 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9575 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9576 {
9577 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9578 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9579 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9580 }
9581 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9582 return rcStrict;
9583}
9584
9585
9586/**
9587 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9588 */
9589HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9590{
9591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9592
9593 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9594 | HMVMX_READ_EXIT_INSTR_INFO
9595 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9596 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9597 | CPUMCTX_EXTRN_SREG_MASK
9598 | CPUMCTX_EXTRN_HWVIRT
9599 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9600 AssertRCReturn(rc, rc);
9601
9602 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9603
9604 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9605 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9606
9607 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9609 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9610 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9611 {
9612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9613 rcStrict = VINF_SUCCESS;
9614 }
9615 return rcStrict;
9616}
9617
9618
9619/**
9620 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9621 */
9622HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9623{
9624 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9625
9626 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9627 | HMVMX_READ_EXIT_INSTR_INFO
9628 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9629 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9630 | CPUMCTX_EXTRN_SREG_MASK
9631 | CPUMCTX_EXTRN_HWVIRT
9632 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9633 AssertRCReturn(rc, rc);
9634
9635 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9636
9637 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9638 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9639
9640 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9641 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9642 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9643 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9644 {
9645 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9646 rcStrict = VINF_SUCCESS;
9647 }
9648 return rcStrict;
9649}
9650
9651
9652/**
9653 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9654 */
9655HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9656{
9657 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9658
9659 /*
9660 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9661 * thus might not need to import the shadow VMCS state, it's safer just in case
9662 * code elsewhere dares look at unsynced VMCS fields.
9663 */
9664 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9665 | HMVMX_READ_EXIT_INSTR_INFO
9666 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9667 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9668 | CPUMCTX_EXTRN_SREG_MASK
9669 | CPUMCTX_EXTRN_HWVIRT
9670 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9671 AssertRCReturn(rc, rc);
9672
9673 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9674
9675 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9676 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9677 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9678
9679 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9680 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9681 {
9682 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9683
9684# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9685 /* Try for exit optimization. This is on the following instruction
9686 because it would be a waste of time to have to reinterpret the
9687 already decoded vmwrite instruction. */
9688 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9689 if (pExitRec)
9690 {
9691 /* Frequent access or probing. */
9692 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9693 AssertRCReturn(rc, rc);
9694
9695 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9696 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9697 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9698 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9699 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9700 }
9701# endif
9702 }
9703 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9704 {
9705 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9706 rcStrict = VINF_SUCCESS;
9707 }
9708 return rcStrict;
9709}
9710
9711
9712/**
9713 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9714 */
9715HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9716{
9717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9718
9719 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9720 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9721 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9722 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9723 AssertRCReturn(rc, rc);
9724
9725 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9726
9727 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9728 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9729 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9730 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9731 {
9732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9733 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9734 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9735 }
9736 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9737 return rcStrict;
9738}
9739
9740
9741/**
9742 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9743 */
9744HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9745{
9746 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9747
9748 /*
9749 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9750 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9751 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9752 */
9753 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9754 | HMVMX_READ_EXIT_INSTR_INFO
9755 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9756 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9757 | CPUMCTX_EXTRN_SREG_MASK
9758 | CPUMCTX_EXTRN_HWVIRT
9759 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9760 AssertRCReturn(rc, rc);
9761
9762 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9763
9764 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9765 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9766 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9767
9768 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9769 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9770 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9771 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9772 {
9773 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9774 rcStrict = VINF_SUCCESS;
9775 }
9776 return rcStrict;
9777}
9778
9779
9780/**
9781 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9782 */
9783HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9784{
9785 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9786
9787 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9788 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9789 | CPUMCTX_EXTRN_HWVIRT
9790 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9791 AssertRCReturn(rc, rc);
9792
9793 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9794
9795 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9796 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9797 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9798 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9799 {
9800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9801 rcStrict = VINF_SUCCESS;
9802 }
9803 return rcStrict;
9804}
9805
9806
9807/**
9808 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9809 */
9810HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9811{
9812 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9813
9814 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9815 | HMVMX_READ_EXIT_INSTR_INFO
9816 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9817 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9818 | CPUMCTX_EXTRN_SREG_MASK
9819 | CPUMCTX_EXTRN_HWVIRT
9820 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9821 AssertRCReturn(rc, rc);
9822
9823 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9824
9825 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9826 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9827
9828 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9829 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9832 {
9833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9834 rcStrict = VINF_SUCCESS;
9835 }
9836 return rcStrict;
9837}
9838
9839
9840/**
9841 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9842 */
9843HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9844{
9845 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9846
9847 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9848 | HMVMX_READ_EXIT_INSTR_INFO
9849 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9850 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9851 | CPUMCTX_EXTRN_SREG_MASK
9852 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9853 AssertRCReturn(rc, rc);
9854
9855 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9856
9857 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9858 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9859
9860 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9861 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9862 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9863 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9864 {
9865 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9866 rcStrict = VINF_SUCCESS;
9867 }
9868 return rcStrict;
9869}
9870
9871
9872# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9873/**
9874 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9875 */
9876HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9877{
9878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9879
9880 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9881 | HMVMX_READ_EXIT_INSTR_INFO
9882 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9883 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9884 | CPUMCTX_EXTRN_SREG_MASK
9885 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9886 AssertRCReturn(rc, rc);
9887
9888 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9889
9890 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9891 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9892
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9896 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9897 {
9898 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9899 rcStrict = VINF_SUCCESS;
9900 }
9901 return rcStrict;
9902}
9903# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9904#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9905/** @} */
9906
9907
9908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9909/** @name Nested-guest VM-exit handlers.
9910 * @{
9911 */
9912/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9913/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9914/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9915
9916/**
9917 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9918 * Conditional VM-exit.
9919 */
9920HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9921{
9922 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9923
9924 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9925
9926 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9927 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9928 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9929
9930 switch (uExitIntType)
9931 {
9932# ifndef IN_NEM_DARWIN
9933 /*
9934 * Physical NMIs:
9935 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9936 */
9937 case VMX_EXIT_INT_INFO_TYPE_NMI:
9938 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9939# endif
9940
9941 /*
9942 * Hardware exceptions,
9943 * Software exceptions,
9944 * Privileged software exceptions:
9945 * Figure out if the exception must be delivered to the guest or the nested-guest.
9946 */
9947 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9948 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9949 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9950 {
9951 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9952 | HMVMX_READ_EXIT_INSTR_LEN
9953 | HMVMX_READ_IDT_VECTORING_INFO
9954 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9955
9956 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9957 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9958 {
9959 /* Exit qualification is required for debug and page-fault exceptions. */
9960 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9961
9962 /*
9963 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9964 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9965 * length. However, if delivery of a software interrupt, software exception or privileged
9966 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9967 */
9968 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9969 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9970 pVmxTransient->uExitIntErrorCode,
9971 pVmxTransient->uIdtVectoringInfo,
9972 pVmxTransient->uIdtVectoringErrorCode);
9973#ifdef DEBUG_ramshankar
9974 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9975 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9976 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9977 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9978 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9979 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9980#endif
9981 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9982 }
9983
9984 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9985 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9986 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9987 }
9988
9989 /*
9990 * Software interrupts:
9991 * VM-exits cannot be caused by software interrupts.
9992 *
9993 * External interrupts:
9994 * This should only happen when "acknowledge external interrupts on VM-exit"
9995 * control is set. However, we never set this when executing a guest or
9996 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9997 * the guest.
9998 */
9999 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10000 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10001 default:
10002 {
10003 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10004 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10005 }
10006 }
10007}
10008
10009
10010/**
10011 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10012 * Unconditional VM-exit.
10013 */
10014HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10015{
10016 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10017 return IEMExecVmxVmexitTripleFault(pVCpu);
10018}
10019
10020
10021/**
10022 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10023 */
10024HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10029 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10030 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10031}
10032
10033
10034/**
10035 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10036 */
10037HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10038{
10039 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10040
10041 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10042 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10043 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10044}
10045
10046
10047/**
10048 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10049 * Unconditional VM-exit.
10050 */
10051HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10052{
10053 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10054
10055 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10056 | HMVMX_READ_EXIT_INSTR_LEN
10057 | HMVMX_READ_IDT_VECTORING_INFO
10058 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10059
10060 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10061 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10062 pVmxTransient->uIdtVectoringErrorCode);
10063 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10064}
10065
10066
10067/**
10068 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10069 */
10070HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10075 {
10076 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10077 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10078 }
10079 return vmxHCExitHlt(pVCpu, pVmxTransient);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10085 */
10086HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10087{
10088 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10089
10090 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10091 {
10092 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10093 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10094 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10095 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10096 }
10097 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10098}
10099
10100
10101/**
10102 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10103 */
10104HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10105{
10106 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10107
10108 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10109 {
10110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10111 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10112 }
10113 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10114}
10115
10116
10117/**
10118 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10119 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10120 */
10121HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10122{
10123 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10124
10125 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10126 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10127
10128 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10129
10130 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10131 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10132 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10133
10134 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10135 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10136 u64VmcsField &= UINT64_C(0xffffffff);
10137
10138 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10139 {
10140 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10141 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10142 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10143 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10144 }
10145
10146 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10147 return vmxHCExitVmread(pVCpu, pVmxTransient);
10148 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10149}
10150
10151
10152/**
10153 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10154 */
10155HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10156{
10157 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10158
10159 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10160 {
10161 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10162 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10163 }
10164
10165 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10166}
10167
10168
10169/**
10170 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10171 * Conditional VM-exit.
10172 */
10173HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10174{
10175 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10176
10177 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10178 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10179
10180 VBOXSTRICTRC rcStrict;
10181 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10182 switch (uAccessType)
10183 {
10184 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10185 {
10186 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10187 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10188 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10189 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10190
10191 bool fIntercept;
10192 switch (iCrReg)
10193 {
10194 case 0:
10195 case 4:
10196 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10197 break;
10198
10199 case 3:
10200 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10201 break;
10202
10203 case 8:
10204 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10205 break;
10206
10207 default:
10208 fIntercept = false;
10209 break;
10210 }
10211 if (fIntercept)
10212 {
10213 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10214 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10215 }
10216 else
10217 {
10218 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10219 AssertRCReturn(rc, rc);
10220 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10221 }
10222 break;
10223 }
10224
10225 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10226 {
10227 /*
10228 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10229 * CR2 reads do not cause a VM-exit.
10230 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10231 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10232 */
10233 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10234 if ( iCrReg == 3
10235 || iCrReg == 8)
10236 {
10237 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10238 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10239 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10240 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10241 {
10242 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10243 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10244 }
10245 else
10246 {
10247 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10248 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10249 }
10250 }
10251 else
10252 {
10253 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10254 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10255 }
10256 break;
10257 }
10258
10259 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10260 {
10261 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10262 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10263 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10264 if ( (uGstHostMask & X86_CR0_TS)
10265 && (uReadShadow & X86_CR0_TS))
10266 {
10267 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10268 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10269 }
10270 else
10271 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10272 break;
10273 }
10274
10275 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10276 {
10277 RTGCPTR GCPtrEffDst;
10278 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10279 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10280 if (fMemOperand)
10281 {
10282 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10283 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10284 }
10285 else
10286 GCPtrEffDst = NIL_RTGCPTR;
10287
10288 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10289 {
10290 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10291 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10292 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10293 }
10294 else
10295 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10296 break;
10297 }
10298
10299 default:
10300 {
10301 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10302 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10303 }
10304 }
10305
10306 if (rcStrict == VINF_IEM_RAISED_XCPT)
10307 {
10308 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10309 rcStrict = VINF_SUCCESS;
10310 }
10311 return rcStrict;
10312}
10313
10314
10315/**
10316 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10317 * Conditional VM-exit.
10318 */
10319HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10320{
10321 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10322
10323 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10324 {
10325 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10326 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10327 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10328 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10329 }
10330 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10331}
10332
10333
10334/**
10335 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10336 * Conditional VM-exit.
10337 */
10338HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10339{
10340 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10341
10342 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10343
10344 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10345 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10346 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10347
10348 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10349 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10350 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10351 {
10352 /*
10353 * IN/OUT instruction:
10354 * - Provides VM-exit instruction length.
10355 *
10356 * INS/OUTS instruction:
10357 * - Provides VM-exit instruction length.
10358 * - Provides Guest-linear address.
10359 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10360 */
10361 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10362 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10363
10364 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10365 pVmxTransient->ExitInstrInfo.u = 0;
10366 pVmxTransient->uGuestLinearAddr = 0;
10367
10368 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10369 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10370 if (fIOString)
10371 {
10372 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10373 if (fVmxInsOutsInfo)
10374 {
10375 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10376 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10377 }
10378 }
10379
10380 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10381 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10382 }
10383 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10384}
10385
10386
10387/**
10388 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10389 */
10390HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10391{
10392 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10393
10394 uint32_t fMsrpm;
10395 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10396 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10397 else
10398 fMsrpm = VMXMSRPM_EXIT_RD;
10399
10400 if (fMsrpm & VMXMSRPM_EXIT_RD)
10401 {
10402 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10403 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10404 }
10405 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10406}
10407
10408
10409/**
10410 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10411 */
10412HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10413{
10414 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10415
10416 uint32_t fMsrpm;
10417 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10418 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10419 else
10420 fMsrpm = VMXMSRPM_EXIT_WR;
10421
10422 if (fMsrpm & VMXMSRPM_EXIT_WR)
10423 {
10424 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10425 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10426 }
10427 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10428}
10429
10430
10431/**
10432 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10433 */
10434HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10435{
10436 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10437
10438 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10439 {
10440 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10441 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10442 }
10443 return vmxHCExitMwait(pVCpu, pVmxTransient);
10444}
10445
10446
10447/**
10448 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10449 * VM-exit.
10450 */
10451HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10452{
10453 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10454
10455 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10456 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10457 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10458 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10459}
10460
10461
10462/**
10463 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10464 */
10465HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10466{
10467 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10468
10469 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10470 {
10471 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10472 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10473 }
10474 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10475}
10476
10477
10478/**
10479 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10480 */
10481HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10482{
10483 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10484
10485 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10486 * PAUSE when executing a nested-guest? If it does not, we would not need
10487 * to check for the intercepts here. Just call VM-exit... */
10488
10489 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10490 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10491 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10492 {
10493 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10494 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10495 }
10496 return vmxHCExitPause(pVCpu, pVmxTransient);
10497}
10498
10499
10500/**
10501 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10502 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10503 */
10504HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10505{
10506 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10507
10508 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10509 {
10510 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10511 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10512 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10513 }
10514 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10515}
10516
10517
10518/**
10519 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10520 * VM-exit.
10521 */
10522HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10523{
10524 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10525
10526 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10527 | HMVMX_READ_EXIT_INSTR_LEN
10528 | HMVMX_READ_IDT_VECTORING_INFO
10529 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10530
10531 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10532
10533 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10534 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10535
10536 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10537 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10538 pVmxTransient->uIdtVectoringErrorCode);
10539 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10540}
10541
10542
10543/**
10544 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10545 * Conditional VM-exit.
10546 */
10547HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10548{
10549 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10550
10551 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10552 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10553 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10554}
10555
10556
10557/**
10558 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10559 * Conditional VM-exit.
10560 */
10561HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10562{
10563 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10564
10565 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10566 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10567 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10568}
10569
10570
10571/**
10572 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10573 */
10574HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10575{
10576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10577
10578 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10579 {
10580 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10581 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10582 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10583 }
10584 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10585}
10586
10587
10588/**
10589 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10590 */
10591HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10592{
10593 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10594
10595 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10596 {
10597 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10598 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10599 }
10600 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10601}
10602
10603
10604/**
10605 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10606 */
10607HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10608{
10609 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10610
10611 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10612 {
10613 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10614 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10615 | HMVMX_READ_EXIT_INSTR_INFO
10616 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10617 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10618 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10619 }
10620 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10621}
10622
10623
10624/**
10625 * Nested-guest VM-exit handler for invalid-guest state
10626 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10627 */
10628HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10629{
10630 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10631
10632 /*
10633 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10634 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10635 * Handle it like it's in an invalid guest state of the outer guest.
10636 *
10637 * When the fast path is implemented, this should be changed to cause the corresponding
10638 * nested-guest VM-exit.
10639 */
10640 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10641}
10642
10643
10644/**
10645 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10646 * and only provide the instruction length.
10647 *
10648 * Unconditional VM-exit.
10649 */
10650HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10651{
10652 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10653
10654#ifdef VBOX_STRICT
10655 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10656 switch (pVmxTransient->uExitReason)
10657 {
10658 case VMX_EXIT_ENCLS:
10659 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10660 break;
10661
10662 case VMX_EXIT_VMFUNC:
10663 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10664 break;
10665 }
10666#endif
10667
10668 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10669 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10670}
10671
10672
10673/**
10674 * Nested-guest VM-exit handler for instructions that provide instruction length as
10675 * well as more information.
10676 *
10677 * Unconditional VM-exit.
10678 */
10679HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10680{
10681 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10682
10683# ifdef VBOX_STRICT
10684 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10685 switch (pVmxTransient->uExitReason)
10686 {
10687 case VMX_EXIT_GDTR_IDTR_ACCESS:
10688 case VMX_EXIT_LDTR_TR_ACCESS:
10689 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10690 break;
10691
10692 case VMX_EXIT_RDRAND:
10693 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10694 break;
10695
10696 case VMX_EXIT_RDSEED:
10697 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10698 break;
10699
10700 case VMX_EXIT_XSAVES:
10701 case VMX_EXIT_XRSTORS:
10702 /** @todo NSTVMX: Verify XSS-bitmap. */
10703 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10704 break;
10705
10706 case VMX_EXIT_UMWAIT:
10707 case VMX_EXIT_TPAUSE:
10708 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10709 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10710 break;
10711
10712 case VMX_EXIT_LOADIWKEY:
10713 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10714 break;
10715 }
10716# endif
10717
10718 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10719 | HMVMX_READ_EXIT_INSTR_LEN
10720 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10721 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10722 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10723}
10724
10725# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10726
10727/**
10728 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10729 * Conditional VM-exit.
10730 */
10731HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10732{
10733 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10734 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10735
10736 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10737 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10738 {
10739 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10740 | HMVMX_READ_EXIT_INSTR_LEN
10741 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10742 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10743 | HMVMX_READ_IDT_VECTORING_INFO
10744 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10745 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10746 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10747 AssertRCReturn(rc, rc);
10748
10749 /*
10750 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10751 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10752 * it's its problem to deal with that issue and we'll clear the recovered event.
10753 */
10754 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10755 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10756 { /*likely*/ }
10757 else
10758 {
10759 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10760 return rcStrict;
10761 }
10762 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10763
10764 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10765 uint64_t const uExitQual = pVmxTransient->uExitQual;
10766
10767 RTGCPTR GCPtrNestedFault;
10768 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10769 if (fIsLinearAddrValid)
10770 {
10771 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10772 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10773 }
10774 else
10775 GCPtrNestedFault = 0;
10776
10777 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10778 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10779 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10780 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10781 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10782
10783 PGMPTWALK Walk;
10784 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10785 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10786 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10787 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10788 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10789 if (RT_SUCCESS(rcStrict))
10790 return rcStrict;
10791
10792 if (fClearEventOnForward)
10793 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10794
10795 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10796 pVmxTransient->uIdtVectoringErrorCode);
10797 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10798 {
10799 VMXVEXITINFO const ExitInfo
10800 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10801 pVmxTransient->uExitQual,
10802 pVmxTransient->cbExitInstr,
10803 pVmxTransient->uGuestLinearAddr,
10804 pVmxTransient->uGuestPhysicalAddr);
10805 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10806 }
10807
10808 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10809 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10810 }
10811
10812 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10813}
10814
10815
10816/**
10817 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10818 * Conditional VM-exit.
10819 */
10820HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10821{
10822 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10823 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10824
10825 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10826 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10827 {
10828 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10829 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10830 AssertRCReturn(rc, rc);
10831
10832 PGMPTWALK Walk;
10833 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10834 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10835 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10836 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10837 0 /* GCPtrNestedFault */, &Walk);
10838 if (RT_SUCCESS(rcStrict))
10839 {
10840 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10841 return rcStrict;
10842 }
10843
10844 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10845 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10846 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10847
10848 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10849 pVmxTransient->uIdtVectoringErrorCode);
10850 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10851 }
10852
10853 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10854}
10855
10856# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10857
10858/** @} */
10859#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10860
10861
10862/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10863 * probes.
10864 *
10865 * The following few functions and associated structure contains the bloat
10866 * necessary for providing detailed debug events and dtrace probes as well as
10867 * reliable host side single stepping. This works on the principle of
10868 * "subclassing" the normal execution loop and workers. We replace the loop
10869 * method completely and override selected helpers to add necessary adjustments
10870 * to their core operation.
10871 *
10872 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10873 * any performance for debug and analysis features.
10874 *
10875 * @{
10876 */
10877
10878/**
10879 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10880 * the debug run loop.
10881 */
10882typedef struct VMXRUNDBGSTATE
10883{
10884 /** The RIP we started executing at. This is for detecting that we stepped. */
10885 uint64_t uRipStart;
10886 /** The CS we started executing with. */
10887 uint16_t uCsStart;
10888
10889 /** Whether we've actually modified the 1st execution control field. */
10890 bool fModifiedProcCtls : 1;
10891 /** Whether we've actually modified the 2nd execution control field. */
10892 bool fModifiedProcCtls2 : 1;
10893 /** Whether we've actually modified the exception bitmap. */
10894 bool fModifiedXcptBitmap : 1;
10895
10896 /** We desire the modified the CR0 mask to be cleared. */
10897 bool fClearCr0Mask : 1;
10898 /** We desire the modified the CR4 mask to be cleared. */
10899 bool fClearCr4Mask : 1;
10900 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10901 uint32_t fCpe1Extra;
10902 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10903 uint32_t fCpe1Unwanted;
10904 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10905 uint32_t fCpe2Extra;
10906 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10907 uint32_t bmXcptExtra;
10908 /** The sequence number of the Dtrace provider settings the state was
10909 * configured against. */
10910 uint32_t uDtraceSettingsSeqNo;
10911 /** VM-exits to check (one bit per VM-exit). */
10912 uint32_t bmExitsToCheck[3];
10913
10914 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10915 uint32_t fProcCtlsInitial;
10916 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10917 uint32_t fProcCtls2Initial;
10918 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10919 uint32_t bmXcptInitial;
10920} VMXRUNDBGSTATE;
10921AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10922typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10923
10924
10925/**
10926 * Initializes the VMXRUNDBGSTATE structure.
10927 *
10928 * @param pVCpu The cross context virtual CPU structure of the
10929 * calling EMT.
10930 * @param pVmxTransient The VMX-transient structure.
10931 * @param pDbgState The debug state to initialize.
10932 */
10933static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10934{
10935 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10936 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10937
10938 pDbgState->fModifiedProcCtls = false;
10939 pDbgState->fModifiedProcCtls2 = false;
10940 pDbgState->fModifiedXcptBitmap = false;
10941 pDbgState->fClearCr0Mask = false;
10942 pDbgState->fClearCr4Mask = false;
10943 pDbgState->fCpe1Extra = 0;
10944 pDbgState->fCpe1Unwanted = 0;
10945 pDbgState->fCpe2Extra = 0;
10946 pDbgState->bmXcptExtra = 0;
10947 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10948 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10949 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10950}
10951
10952
10953/**
10954 * Updates the VMSC fields with changes requested by @a pDbgState.
10955 *
10956 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10957 * immediately before executing guest code, i.e. when interrupts are disabled.
10958 * We don't check status codes here as we cannot easily assert or return in the
10959 * latter case.
10960 *
10961 * @param pVCpu The cross context virtual CPU structure.
10962 * @param pVmxTransient The VMX-transient structure.
10963 * @param pDbgState The debug state.
10964 */
10965static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10966{
10967 /*
10968 * Ensure desired flags in VMCS control fields are set.
10969 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10970 *
10971 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10972 * there should be no stale data in pCtx at this point.
10973 */
10974 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10975 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10976 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10977 {
10978 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10979 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10980 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10981 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10982 pDbgState->fModifiedProcCtls = true;
10983 }
10984
10985 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10986 {
10987 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10988 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10989 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10990 pDbgState->fModifiedProcCtls2 = true;
10991 }
10992
10993 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10994 {
10995 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10996 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10997 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10998 pDbgState->fModifiedXcptBitmap = true;
10999 }
11000
11001 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11002 {
11003 pVmcsInfo->u64Cr0Mask = 0;
11004 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11005 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11006 }
11007
11008 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11009 {
11010 pVmcsInfo->u64Cr4Mask = 0;
11011 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11012 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11013 }
11014
11015 NOREF(pVCpu);
11016}
11017
11018
11019/**
11020 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11021 * re-entry next time around.
11022 *
11023 * @returns Strict VBox status code (i.e. informational status codes too).
11024 * @param pVCpu The cross context virtual CPU structure.
11025 * @param pVmxTransient The VMX-transient structure.
11026 * @param pDbgState The debug state.
11027 * @param rcStrict The return code from executing the guest using single
11028 * stepping.
11029 */
11030static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11031 VBOXSTRICTRC rcStrict)
11032{
11033 /*
11034 * Restore VM-exit control settings as we may not reenter this function the
11035 * next time around.
11036 */
11037 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11038
11039 /* We reload the initial value, trigger what we can of recalculations the
11040 next time around. From the looks of things, that's all that's required atm. */
11041 if (pDbgState->fModifiedProcCtls)
11042 {
11043 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11044 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11045 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11046 AssertRC(rc2);
11047 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11048 }
11049
11050 /* We're currently the only ones messing with this one, so just restore the
11051 cached value and reload the field. */
11052 if ( pDbgState->fModifiedProcCtls2
11053 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11054 {
11055 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11056 AssertRC(rc2);
11057 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11058 }
11059
11060 /* If we've modified the exception bitmap, we restore it and trigger
11061 reloading and partial recalculation the next time around. */
11062 if (pDbgState->fModifiedXcptBitmap)
11063 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11064
11065 return rcStrict;
11066}
11067
11068
11069/**
11070 * Configures VM-exit controls for current DBGF and DTrace settings.
11071 *
11072 * This updates @a pDbgState and the VMCS execution control fields to reflect
11073 * the necessary VM-exits demanded by DBGF and DTrace.
11074 *
11075 * @param pVCpu The cross context virtual CPU structure.
11076 * @param pVmxTransient The VMX-transient structure. May update
11077 * fUpdatedTscOffsettingAndPreemptTimer.
11078 * @param pDbgState The debug state.
11079 */
11080static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11081{
11082#ifndef IN_NEM_DARWIN
11083 /*
11084 * Take down the dtrace serial number so we can spot changes.
11085 */
11086 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11087 ASMCompilerBarrier();
11088#endif
11089
11090 /*
11091 * We'll rebuild most of the middle block of data members (holding the
11092 * current settings) as we go along here, so start by clearing it all.
11093 */
11094 pDbgState->bmXcptExtra = 0;
11095 pDbgState->fCpe1Extra = 0;
11096 pDbgState->fCpe1Unwanted = 0;
11097 pDbgState->fCpe2Extra = 0;
11098 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11099 pDbgState->bmExitsToCheck[i] = 0;
11100
11101 /*
11102 * Software interrupts (INT XXh) - no idea how to trigger these...
11103 */
11104 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11105 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11106 || VBOXVMM_INT_SOFTWARE_ENABLED())
11107 {
11108 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11109 }
11110
11111 /*
11112 * INT3 breakpoints - triggered by #BP exceptions.
11113 */
11114 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11115 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11116
11117 /*
11118 * Exception bitmap and XCPT events+probes.
11119 */
11120 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11121 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11122 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11123
11124 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11125 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11126 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11127 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11128 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11129 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11130 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11131 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11132 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11133 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11134 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11135 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11136 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11137 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11138 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11139 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11140 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11141 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11142
11143 if (pDbgState->bmXcptExtra)
11144 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11145
11146 /*
11147 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11148 *
11149 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11150 * So, when adding/changing/removing please don't forget to update it.
11151 *
11152 * Some of the macros are picking up local variables to save horizontal space,
11153 * (being able to see it in a table is the lesser evil here).
11154 */
11155#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11156 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11157 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11158#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11159 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11160 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11161 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11162 } else do { } while (0)
11163#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11164 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11165 { \
11166 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11167 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11168 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11169 } else do { } while (0)
11170#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11171 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11172 { \
11173 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11174 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11175 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11176 } else do { } while (0)
11177#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11178 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11179 { \
11180 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11181 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11182 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11183 } else do { } while (0)
11184
11185 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11186 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11187 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11188 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11189 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11190
11191 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11192 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11193 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11194 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11195 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11196 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11197 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11198 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11199 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11200 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11201 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11202 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11203 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11204 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11205 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11206 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11207 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11208 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11209 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11210 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11211 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11212 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11213 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11214 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11215 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11216 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11217 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11218 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11219 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11220 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11221 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11222 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11223 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11224 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11225 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11226 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11227
11228 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11229 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11230 {
11231 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11232 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11233 AssertRC(rc);
11234
11235#if 0 /** @todo fix me */
11236 pDbgState->fClearCr0Mask = true;
11237 pDbgState->fClearCr4Mask = true;
11238#endif
11239 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11240 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11241 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11242 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11243 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11244 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11245 require clearing here and in the loop if we start using it. */
11246 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11247 }
11248 else
11249 {
11250 if (pDbgState->fClearCr0Mask)
11251 {
11252 pDbgState->fClearCr0Mask = false;
11253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11254 }
11255 if (pDbgState->fClearCr4Mask)
11256 {
11257 pDbgState->fClearCr4Mask = false;
11258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11259 }
11260 }
11261 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11263
11264 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11265 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11266 {
11267 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11268 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11269 }
11270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11272
11273 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11275 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11277 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11278 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11279 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11281#if 0 /** @todo too slow, fix handler. */
11282 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11283#endif
11284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11285
11286 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11287 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11288 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11289 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11290 {
11291 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11292 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11293 }
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11298
11299 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11300 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11301 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11302 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11303 {
11304 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11305 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11306 }
11307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11308 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11310 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11311
11312 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11314 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11315 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11316 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11318 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11319 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11320 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11321 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11322 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11323 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11324 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11325 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11326 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11328 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11329 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11330 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11331 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11332 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11333 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11334
11335#undef IS_EITHER_ENABLED
11336#undef SET_ONLY_XBM_IF_EITHER_EN
11337#undef SET_CPE1_XBM_IF_EITHER_EN
11338#undef SET_CPEU_XBM_IF_EITHER_EN
11339#undef SET_CPE2_XBM_IF_EITHER_EN
11340
11341 /*
11342 * Sanitize the control stuff.
11343 */
11344 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11345 if (pDbgState->fCpe2Extra)
11346 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11347 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11348 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11349#ifndef IN_NEM_DARWIN
11350 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11351 {
11352 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11353 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11354 }
11355#else
11356 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11357 {
11358 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11359 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11360 }
11361#endif
11362
11363 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11364 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11365 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11366 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11367}
11368
11369
11370/**
11371 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11372 * appropriate.
11373 *
11374 * The caller has checked the VM-exit against the
11375 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11376 * already, so we don't have to do that either.
11377 *
11378 * @returns Strict VBox status code (i.e. informational status codes too).
11379 * @param pVCpu The cross context virtual CPU structure.
11380 * @param pVmxTransient The VMX-transient structure.
11381 * @param uExitReason The VM-exit reason.
11382 *
11383 * @remarks The name of this function is displayed by dtrace, so keep it short
11384 * and to the point. No longer than 33 chars long, please.
11385 */
11386static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11387{
11388 /*
11389 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11390 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11391 *
11392 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11393 * does. Must add/change/remove both places. Same ordering, please.
11394 *
11395 * Added/removed events must also be reflected in the next section
11396 * where we dispatch dtrace events.
11397 */
11398 bool fDtrace1 = false;
11399 bool fDtrace2 = false;
11400 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11401 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11402 uint32_t uEventArg = 0;
11403#define SET_EXIT(a_EventSubName) \
11404 do { \
11405 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11406 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11407 } while (0)
11408#define SET_BOTH(a_EventSubName) \
11409 do { \
11410 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11411 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11412 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11413 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11414 } while (0)
11415 switch (uExitReason)
11416 {
11417 case VMX_EXIT_MTF:
11418 return vmxHCExitMtf(pVCpu, pVmxTransient);
11419
11420 case VMX_EXIT_XCPT_OR_NMI:
11421 {
11422 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11423 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11424 {
11425 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11426 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11427 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11428 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11429 {
11430 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11431 {
11432 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11433 uEventArg = pVmxTransient->uExitIntErrorCode;
11434 }
11435 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11436 switch (enmEvent1)
11437 {
11438 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11439 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11440 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11441 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11442 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11443 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11444 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11445 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11446 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11447 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11448 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11449 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11450 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11451 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11452 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11453 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11454 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11455 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11456 default: break;
11457 }
11458 }
11459 else
11460 AssertFailed();
11461 break;
11462
11463 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11464 uEventArg = idxVector;
11465 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11466 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11467 break;
11468 }
11469 break;
11470 }
11471
11472 case VMX_EXIT_TRIPLE_FAULT:
11473 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11474 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11475 break;
11476 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11477 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11478 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11479 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11480 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11481
11482 /* Instruction specific VM-exits: */
11483 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11484 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11485 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11486 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11487 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11488 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11489 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11490 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11491 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11492 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11493 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11494 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11495 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11496 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11497 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11498 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11499 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11500 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11501 case VMX_EXIT_MOV_CRX:
11502 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11503 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11504 SET_BOTH(CRX_READ);
11505 else
11506 SET_BOTH(CRX_WRITE);
11507 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11508 break;
11509 case VMX_EXIT_MOV_DRX:
11510 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11511 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11512 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11513 SET_BOTH(DRX_READ);
11514 else
11515 SET_BOTH(DRX_WRITE);
11516 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11517 break;
11518 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11519 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11520 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11521 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11522 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11523 case VMX_EXIT_GDTR_IDTR_ACCESS:
11524 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11525 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11526 {
11527 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11528 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11529 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11530 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11531 }
11532 break;
11533
11534 case VMX_EXIT_LDTR_TR_ACCESS:
11535 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11536 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11537 {
11538 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11539 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11540 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11541 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11542 }
11543 break;
11544
11545 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11546 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11547 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11548 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11549 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11550 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11551 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11552 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11553 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11554 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11555 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11556
11557 /* Events that aren't relevant at this point. */
11558 case VMX_EXIT_EXT_INT:
11559 case VMX_EXIT_INT_WINDOW:
11560 case VMX_EXIT_NMI_WINDOW:
11561 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11562 case VMX_EXIT_PREEMPT_TIMER:
11563 case VMX_EXIT_IO_INSTR:
11564 break;
11565
11566 /* Errors and unexpected events. */
11567 case VMX_EXIT_INIT_SIGNAL:
11568 case VMX_EXIT_SIPI:
11569 case VMX_EXIT_IO_SMI:
11570 case VMX_EXIT_SMI:
11571 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11572 case VMX_EXIT_ERR_MSR_LOAD:
11573 case VMX_EXIT_ERR_MACHINE_CHECK:
11574 case VMX_EXIT_PML_FULL:
11575 case VMX_EXIT_VIRTUALIZED_EOI:
11576 break;
11577
11578 default:
11579 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11580 break;
11581 }
11582#undef SET_BOTH
11583#undef SET_EXIT
11584
11585 /*
11586 * Dtrace tracepoints go first. We do them here at once so we don't
11587 * have to copy the guest state saving and stuff a few dozen times.
11588 * Down side is that we've got to repeat the switch, though this time
11589 * we use enmEvent since the probes are a subset of what DBGF does.
11590 */
11591 if (fDtrace1 || fDtrace2)
11592 {
11593 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11594 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11595 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11596 switch (enmEvent1)
11597 {
11598 /** @todo consider which extra parameters would be helpful for each probe. */
11599 case DBGFEVENT_END: break;
11600 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11601 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11602 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11603 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11604 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11605 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11606 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11607 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11608 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11609 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11610 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11611 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11612 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11613 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11614 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11615 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11616 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11617 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11618 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11619 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11620 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11621 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11622 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11623 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11624 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11625 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11626 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11627 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11628 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11629 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11630 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11631 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11632 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11633 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11634 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11635 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11636 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11637 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11638 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11639 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11640 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11646 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11647 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11648 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11649 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11650 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11651 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11652 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11653 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11661 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11662 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11663 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11664 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11665 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11666 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11667 }
11668 switch (enmEvent2)
11669 {
11670 /** @todo consider which extra parameters would be helpful for each probe. */
11671 case DBGFEVENT_END: break;
11672 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11673 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11674 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11675 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11676 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11677 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11678 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11679 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11680 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11681 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11682 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11683 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11684 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11685 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11686 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11687 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11688 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11689 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11690 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11691 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11692 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11694 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11700 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11701 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11702 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11703 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11704 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11705 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11707 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11715 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11716 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11717 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11718 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11719 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11720 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11721 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11724 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11725 }
11726 }
11727
11728 /*
11729 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11730 * the DBGF call will do a full check).
11731 *
11732 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11733 * Note! If we have to events, we prioritize the first, i.e. the instruction
11734 * one, in order to avoid event nesting.
11735 */
11736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11737 if ( enmEvent1 != DBGFEVENT_END
11738 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11739 {
11740 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11741 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11742 if (rcStrict != VINF_SUCCESS)
11743 return rcStrict;
11744 }
11745 else if ( enmEvent2 != DBGFEVENT_END
11746 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11747 {
11748 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11749 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11750 if (rcStrict != VINF_SUCCESS)
11751 return rcStrict;
11752 }
11753
11754 return VINF_SUCCESS;
11755}
11756
11757
11758/**
11759 * Single-stepping VM-exit filtering.
11760 *
11761 * This is preprocessing the VM-exits and deciding whether we've gotten far
11762 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11763 * handling is performed.
11764 *
11765 * @returns Strict VBox status code (i.e. informational status codes too).
11766 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11767 * @param pVmxTransient The VMX-transient structure.
11768 * @param pDbgState The debug state.
11769 */
11770DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11771{
11772 /*
11773 * Expensive (saves context) generic dtrace VM-exit probe.
11774 */
11775 uint32_t const uExitReason = pVmxTransient->uExitReason;
11776 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11777 { /* more likely */ }
11778 else
11779 {
11780 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11781 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11782 AssertRC(rc);
11783 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11784 }
11785
11786#ifndef IN_NEM_DARWIN
11787 /*
11788 * Check for host NMI, just to get that out of the way.
11789 */
11790 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11791 { /* normally likely */ }
11792 else
11793 {
11794 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11795 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11796 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11797 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11798 }
11799#endif
11800
11801 /*
11802 * Check for single stepping event if we're stepping.
11803 */
11804 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11805 {
11806 switch (uExitReason)
11807 {
11808 case VMX_EXIT_MTF:
11809 return vmxHCExitMtf(pVCpu, pVmxTransient);
11810
11811 /* Various events: */
11812 case VMX_EXIT_XCPT_OR_NMI:
11813 case VMX_EXIT_EXT_INT:
11814 case VMX_EXIT_TRIPLE_FAULT:
11815 case VMX_EXIT_INT_WINDOW:
11816 case VMX_EXIT_NMI_WINDOW:
11817 case VMX_EXIT_TASK_SWITCH:
11818 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11819 case VMX_EXIT_APIC_ACCESS:
11820 case VMX_EXIT_EPT_VIOLATION:
11821 case VMX_EXIT_EPT_MISCONFIG:
11822 case VMX_EXIT_PREEMPT_TIMER:
11823
11824 /* Instruction specific VM-exits: */
11825 case VMX_EXIT_CPUID:
11826 case VMX_EXIT_GETSEC:
11827 case VMX_EXIT_HLT:
11828 case VMX_EXIT_INVD:
11829 case VMX_EXIT_INVLPG:
11830 case VMX_EXIT_RDPMC:
11831 case VMX_EXIT_RDTSC:
11832 case VMX_EXIT_RSM:
11833 case VMX_EXIT_VMCALL:
11834 case VMX_EXIT_VMCLEAR:
11835 case VMX_EXIT_VMLAUNCH:
11836 case VMX_EXIT_VMPTRLD:
11837 case VMX_EXIT_VMPTRST:
11838 case VMX_EXIT_VMREAD:
11839 case VMX_EXIT_VMRESUME:
11840 case VMX_EXIT_VMWRITE:
11841 case VMX_EXIT_VMXOFF:
11842 case VMX_EXIT_VMXON:
11843 case VMX_EXIT_MOV_CRX:
11844 case VMX_EXIT_MOV_DRX:
11845 case VMX_EXIT_IO_INSTR:
11846 case VMX_EXIT_RDMSR:
11847 case VMX_EXIT_WRMSR:
11848 case VMX_EXIT_MWAIT:
11849 case VMX_EXIT_MONITOR:
11850 case VMX_EXIT_PAUSE:
11851 case VMX_EXIT_GDTR_IDTR_ACCESS:
11852 case VMX_EXIT_LDTR_TR_ACCESS:
11853 case VMX_EXIT_INVEPT:
11854 case VMX_EXIT_RDTSCP:
11855 case VMX_EXIT_INVVPID:
11856 case VMX_EXIT_WBINVD:
11857 case VMX_EXIT_XSETBV:
11858 case VMX_EXIT_RDRAND:
11859 case VMX_EXIT_INVPCID:
11860 case VMX_EXIT_VMFUNC:
11861 case VMX_EXIT_RDSEED:
11862 case VMX_EXIT_XSAVES:
11863 case VMX_EXIT_XRSTORS:
11864 {
11865 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11866 AssertRCReturn(rc, rc);
11867 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11868 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11869 return VINF_EM_DBG_STEPPED;
11870 break;
11871 }
11872
11873 /* Errors and unexpected events: */
11874 case VMX_EXIT_INIT_SIGNAL:
11875 case VMX_EXIT_SIPI:
11876 case VMX_EXIT_IO_SMI:
11877 case VMX_EXIT_SMI:
11878 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11879 case VMX_EXIT_ERR_MSR_LOAD:
11880 case VMX_EXIT_ERR_MACHINE_CHECK:
11881 case VMX_EXIT_PML_FULL:
11882 case VMX_EXIT_VIRTUALIZED_EOI:
11883 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11884 break;
11885
11886 default:
11887 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11888 break;
11889 }
11890 }
11891
11892 /*
11893 * Check for debugger event breakpoints and dtrace probes.
11894 */
11895 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11896 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11897 {
11898 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11899 if (rcStrict != VINF_SUCCESS)
11900 return rcStrict;
11901 }
11902
11903 /*
11904 * Normal processing.
11905 */
11906#ifdef HMVMX_USE_FUNCTION_TABLE
11907 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11908#else
11909 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11910#endif
11911}
11912
11913/** @} */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette