VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 98506

最後變更 在這個檔案從98506是 98506,由 vboxsync 提交於 2 年 前

VMM: Nested VMX: bugref:10318 More Darwin build fixes.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 519.8 KB
 
1/* $Id: VMXAllTemplate.cpp.h 98506 2023-02-08 14:56:04Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042}
3043
3044
3045/**
3046 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3051{
3052 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3056 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391#ifndef IN_NEM_DARWIN
3392/**
3393 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3394 * context.
3395 *
3396 * The other MSRs are in the VM-exit MSR-store.
3397 *
3398 * @returns VBox status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param pVmcsInfo The VMCS info. object.
3401 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3402 * unexpected errors). Ignored in NEM/darwin context.
3403 */
3404DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3405{
3406 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3407 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3408 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3409 Assert(pMsrs);
3410 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3411 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3412 for (uint32_t i = 0; i < cMsrs; i++)
3413 {
3414 uint32_t const idMsr = pMsrs[i].u32Msr;
3415 switch (idMsr)
3416 {
3417 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3418 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3419 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3420 default:
3421 {
3422 uint32_t idxLbrMsr;
3423 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3424 if (VM_IS_VMX_LBR(pVM))
3425 {
3426 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3427 {
3428 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3429 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3430 break;
3431 }
3432 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3433 {
3434 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3435 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3436 break;
3437 }
3438 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3439 {
3440 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3441 break;
3442 }
3443 /* Fallthru (no break) */
3444 }
3445 pVCpu->cpum.GstCtx.fExtrn = 0;
3446 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3447 ASMSetFlags(fEFlags);
3448 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3449 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3450 }
3451 }
3452 }
3453 return VINF_SUCCESS;
3454}
3455#endif /* !IN_NEM_DARWIN */
3456
3457
3458/**
3459 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3460 *
3461 * @param pVCpu The cross context virtual CPU structure.
3462 * @param pVmcsInfo The VMCS info. object.
3463 */
3464DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3465{
3466 uint64_t u64Cr0;
3467 uint64_t u64Shadow;
3468 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3469 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3470#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3471 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3472 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3473#else
3474 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3475 {
3476 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3477 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3478 }
3479 else
3480 {
3481 /*
3482 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3483 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3484 * re-construct CR0. See @bugref{9180#c95} for details.
3485 */
3486 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3487 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3488 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3489 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3490 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3491 Assert(u64Cr0 & X86_CR0_NE);
3492 }
3493#endif
3494
3495#ifndef IN_NEM_DARWIN
3496 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3497#endif
3498 CPUMSetGuestCR0(pVCpu, u64Cr0);
3499#ifndef IN_NEM_DARWIN
3500 VMMRZCallRing3Enable(pVCpu);
3501#endif
3502}
3503
3504
3505/**
3506 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3507 *
3508 * @param pVCpu The cross context virtual CPU structure.
3509 */
3510DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3511{
3512 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3513 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3514
3515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3516 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3517 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3518 && CPUMIsGuestPagingEnabledEx(pCtx)))
3519 {
3520 uint64_t u64Cr3;
3521 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3522 if (pCtx->cr3 != u64Cr3)
3523 {
3524 pCtx->cr3 = u64Cr3;
3525 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3526 }
3527
3528 /*
3529 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3530 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3531 */
3532 if (CPUMIsGuestInPAEModeEx(pCtx))
3533 {
3534 X86PDPE aPaePdpes[4];
3535 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3536 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3539 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3540 {
3541 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3542 /* PGM now updates PAE PDPTEs while updating CR3. */
3543 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3544 }
3545 }
3546 }
3547}
3548
3549
3550/**
3551 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3552 *
3553 * @param pVCpu The cross context virtual CPU structure.
3554 * @param pVmcsInfo The VMCS info. object.
3555 */
3556DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3557{
3558 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3559 uint64_t u64Cr4;
3560 uint64_t u64Shadow;
3561 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3563#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3564 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3565 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3566#else
3567 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3568 {
3569 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3570 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3571 }
3572 else
3573 {
3574 /*
3575 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3576 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3577 * re-construct CR4. See @bugref{9180#c95} for details.
3578 */
3579 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3580 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3581 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3582 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3583 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3584 Assert(u64Cr4 & X86_CR4_VMXE);
3585 }
3586#endif
3587 pCtx->cr4 = u64Cr4;
3588}
3589
3590
3591/**
3592 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3593 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3594 */
3595DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3596{
3597 /*
3598 * We must import RIP here to set our EM interrupt-inhibited state.
3599 * We also import RFLAGS as our code that evaluates pending interrupts
3600 * before VM-entry requires it.
3601 */
3602 vmxHCImportGuestRip(pVCpu);
3603 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3604
3605 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3606 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3607 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3608 pVCpu->cpum.GstCtx.rip);
3609 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3610}
3611
3612
3613/**
3614 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3615 * context.
3616 *
3617 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3618 *
3619 * @param pVCpu The cross context virtual CPU structure.
3620 * @param pVmcsInfo The VMCS info. object.
3621 *
3622 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3623 * do not log!
3624 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3625 * instead!!!
3626 */
3627DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3628{
3629 uint32_t u32Val;
3630 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3631 if (!u32Val)
3632 {
3633 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3634 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3635 }
3636 else
3637 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3638}
3639
3640
3641/**
3642 * Worker for VMXR0ImportStateOnDemand.
3643 *
3644 * @returns VBox status code.
3645 * @param pVCpu The cross context virtual CPU structure.
3646 * @param pVmcsInfo The VMCS info. object.
3647 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3648 */
3649static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3650{
3651 int rc = VINF_SUCCESS;
3652 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3653 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3654 uint32_t u32Val;
3655
3656 /*
3657 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3658 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3659 * neither are other host platforms.
3660 *
3661 * Committing this temporarily as it prevents BSOD.
3662 *
3663 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3664 */
3665#ifdef RT_OS_WINDOWS
3666 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3667 return VERR_HM_IPE_1;
3668#endif
3669
3670 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3671
3672#ifndef IN_NEM_DARWIN
3673 /*
3674 * We disable interrupts to make the updating of the state and in particular
3675 * the fExtrn modification atomic wrt to preemption hooks.
3676 */
3677 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3678#endif
3679
3680 fWhat &= pCtx->fExtrn;
3681 if (fWhat)
3682 {
3683 do
3684 {
3685 if (fWhat & CPUMCTX_EXTRN_RIP)
3686 vmxHCImportGuestRip(pVCpu);
3687
3688 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3689 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3690
3691 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3692 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3693 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3694
3695 if (fWhat & CPUMCTX_EXTRN_RSP)
3696 {
3697 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3698 AssertRC(rc);
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3702 {
3703 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3704#ifndef IN_NEM_DARWIN
3705 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3706#else
3707 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3708#endif
3709 if (fWhat & CPUMCTX_EXTRN_CS)
3710 {
3711 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3712 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3713 if (fRealOnV86Active)
3714 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3715 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3716 }
3717 if (fWhat & CPUMCTX_EXTRN_SS)
3718 {
3719 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3720 if (fRealOnV86Active)
3721 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3722 }
3723 if (fWhat & CPUMCTX_EXTRN_DS)
3724 {
3725 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3726 if (fRealOnV86Active)
3727 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3728 }
3729 if (fWhat & CPUMCTX_EXTRN_ES)
3730 {
3731 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3732 if (fRealOnV86Active)
3733 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3734 }
3735 if (fWhat & CPUMCTX_EXTRN_FS)
3736 {
3737 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3738 if (fRealOnV86Active)
3739 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3740 }
3741 if (fWhat & CPUMCTX_EXTRN_GS)
3742 {
3743 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3744 if (fRealOnV86Active)
3745 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3746 }
3747 }
3748
3749 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3750 {
3751 if (fWhat & CPUMCTX_EXTRN_LDTR)
3752 vmxHCImportGuestLdtr(pVCpu);
3753
3754 if (fWhat & CPUMCTX_EXTRN_GDTR)
3755 {
3756 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3757 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3758 pCtx->gdtr.cbGdt = u32Val;
3759 }
3760
3761 /* Guest IDTR. */
3762 if (fWhat & CPUMCTX_EXTRN_IDTR)
3763 {
3764 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3765 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3766 pCtx->idtr.cbIdt = u32Val;
3767 }
3768
3769 /* Guest TR. */
3770 if (fWhat & CPUMCTX_EXTRN_TR)
3771 {
3772#ifndef IN_NEM_DARWIN
3773 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3774 don't need to import that one. */
3775 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3776#endif
3777 vmxHCImportGuestTr(pVCpu);
3778 }
3779 }
3780
3781 if (fWhat & CPUMCTX_EXTRN_DR7)
3782 {
3783#ifndef IN_NEM_DARWIN
3784 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3785#endif
3786 {
3787 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3788 AssertRC(rc);
3789 }
3790 }
3791
3792 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3793 {
3794 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3795 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3796 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3797 pCtx->SysEnter.cs = u32Val;
3798 }
3799
3800#ifndef IN_NEM_DARWIN
3801 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3802 {
3803 if ( pVM->hmr0.s.fAllow64BitGuests
3804 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3805 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3806 }
3807
3808 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3809 {
3810 if ( pVM->hmr0.s.fAllow64BitGuests
3811 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3812 {
3813 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3814 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3815 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3816 }
3817 }
3818
3819 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3820 {
3821 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3822 AssertRCReturn(rc, rc);
3823 }
3824#else
3825 NOREF(pVM);
3826#endif
3827
3828 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3829 {
3830 if (fWhat & CPUMCTX_EXTRN_CR0)
3831 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3832
3833 if (fWhat & CPUMCTX_EXTRN_CR4)
3834 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3835
3836 if (fWhat & CPUMCTX_EXTRN_CR3)
3837 vmxHCImportGuestCr3(pVCpu);
3838 }
3839
3840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3841 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3842 {
3843 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3844 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3845 {
3846 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3847 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3848 if (RT_SUCCESS(rc))
3849 { /* likely */ }
3850 else
3851 break;
3852 }
3853 }
3854#endif
3855 } while (0);
3856
3857 if (RT_SUCCESS(rc))
3858 {
3859 /* Update fExtrn. */
3860 pCtx->fExtrn &= ~fWhat;
3861
3862 /* If everything has been imported, clear the HM keeper bit. */
3863 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3864 {
3865#ifndef IN_NEM_DARWIN
3866 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3867#else
3868 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3869#endif
3870 Assert(!pCtx->fExtrn);
3871 }
3872 }
3873 }
3874#ifndef IN_NEM_DARWIN
3875 else
3876 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3877
3878 /*
3879 * Restore interrupts.
3880 */
3881 ASMSetFlags(fEFlags);
3882#endif
3883
3884 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3885
3886 if (RT_SUCCESS(rc))
3887 { /* likely */ }
3888 else
3889 return rc;
3890
3891 /*
3892 * Honor any pending CR3 updates.
3893 *
3894 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3895 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3896 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3897 *
3898 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3899 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3900 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3901 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3902 *
3903 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3904 *
3905 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3906 */
3907 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3908#ifndef IN_NEM_DARWIN
3909 && VMMRZCallRing3IsEnabled(pVCpu)
3910#endif
3911 )
3912 {
3913 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3914 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3915 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3916 }
3917
3918 return VINF_SUCCESS;
3919}
3920
3921
3922/**
3923 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3924 *
3925 * @returns VBox status code.
3926 * @param pVCpu The cross context virtual CPU structure.
3927 * @param pVmcsInfo The VMCS info. object.
3928 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3929 * in NEM/darwin context.
3930 * @tparam a_fWhat What to import, zero or more bits from
3931 * HMVMX_CPUMCTX_EXTRN_ALL.
3932 */
3933template<uint64_t const a_fWhat>
3934static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3935{
3936 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3937 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3938 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3939 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3940
3941 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3942
3943 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3944
3945 /* RIP and RFLAGS may have been imported already by the post exit code
3946 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3947 of the code is skipping this part of the code. */
3948 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3949 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3950 {
3951 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3952 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3953
3954 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3955 {
3956 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3957 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3958 else
3959 vmxHCImportGuestCoreRip(pVCpu);
3960 }
3961 }
3962
3963 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3964 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3965 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3966
3967 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3968 {
3969 if (a_fWhat & CPUMCTX_EXTRN_CS)
3970 {
3971 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3972 /** @todo try get rid of this carp, it smells and is probably never ever
3973 * used: */
3974 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3975 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3976 {
3977 vmxHCImportGuestCoreRip(pVCpu);
3978 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3979 }
3980 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3981 }
3982 if (a_fWhat & CPUMCTX_EXTRN_SS)
3983 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3984 if (a_fWhat & CPUMCTX_EXTRN_DS)
3985 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3986 if (a_fWhat & CPUMCTX_EXTRN_ES)
3987 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_FS)
3989 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3990 if (a_fWhat & CPUMCTX_EXTRN_GS)
3991 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3992
3993 /* Guest TR.
3994 Real-mode emulation using virtual-8086 mode has the fake TSS
3995 (pRealModeTSS) in TR, don't need to import that one. */
3996#ifndef IN_NEM_DARWIN
3997 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3998 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3999 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4000#else
4001 if (a_fWhat & CPUMCTX_EXTRN_TR)
4002#endif
4003 vmxHCImportGuestTr(pVCpu);
4004
4005#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4006 if (fRealOnV86Active)
4007 {
4008 if (a_fWhat & CPUMCTX_EXTRN_CS)
4009 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4010 if (a_fWhat & CPUMCTX_EXTRN_SS)
4011 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4012 if (a_fWhat & CPUMCTX_EXTRN_DS)
4013 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_ES)
4015 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_FS)
4017 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4018 if (a_fWhat & CPUMCTX_EXTRN_GS)
4019 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4020 }
4021#endif
4022 }
4023
4024 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4025 {
4026 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4027 AssertRC(rc);
4028 }
4029
4030 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4031 vmxHCImportGuestLdtr(pVCpu);
4032
4033 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4034 {
4035 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4036 uint32_t u32Val;
4037 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4038 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4039 }
4040
4041 /* Guest IDTR. */
4042 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4043 {
4044 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4045 uint32_t u32Val;
4046 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4047 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4048 }
4049
4050 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4051 {
4052#ifndef IN_NEM_DARWIN
4053 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4054#endif
4055 {
4056 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4057 AssertRC(rc);
4058 }
4059 }
4060
4061 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4062 {
4063 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4064 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4065 uint32_t u32Val;
4066 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4067 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4068 }
4069
4070#ifndef IN_NEM_DARWIN
4071 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4072 {
4073 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4074 && pVM->hmr0.s.fAllow64BitGuests)
4075 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4076 }
4077
4078 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4079 {
4080 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4081 && pVM->hmr0.s.fAllow64BitGuests)
4082 {
4083 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4084 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4085 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4086 }
4087 }
4088
4089 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4090 {
4091 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4092 AssertRCReturn(rc1, rc1);
4093 }
4094#endif
4095
4096 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4097 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4098
4099 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4100 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4101
4102 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4103 vmxHCImportGuestCr3(pVCpu);
4104
4105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4106 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4107 {
4108 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4109 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4110 {
4111 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4112 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4113 AssertRCReturn(rc, rc);
4114 }
4115 }
4116#endif
4117
4118 /* Update fExtrn. */
4119 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4120
4121 /* If everything has been imported, clear the HM keeper bit. */
4122 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4123 {
4124#ifndef IN_NEM_DARWIN
4125 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4126#else
4127 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4128#endif
4129 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4130 }
4131
4132 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4133
4134 /*
4135 * Honor any pending CR3 updates.
4136 *
4137 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4138 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4139 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4140 *
4141 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4142 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4143 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4144 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4145 *
4146 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4147 *
4148 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4149 */
4150#ifndef IN_NEM_DARWIN
4151 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4152 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4153 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4154 return VINF_SUCCESS;
4155 ASMSetFlags(fEFlags);
4156#else
4157 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4158 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4159 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4160 return VINF_SUCCESS;
4161 RT_NOREF_PV(fEFlags);
4162#endif
4163
4164 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4165 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4166 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4167 return VINF_SUCCESS;
4168}
4169
4170
4171/**
4172 * Internal state fetcher.
4173 *
4174 * @returns VBox status code.
4175 * @param pVCpu The cross context virtual CPU structure.
4176 * @param pVmcsInfo The VMCS info. object.
4177 * @param pszCaller For logging.
4178 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4179 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4180 * already. This is ORed together with @a a_fWhat when
4181 * calculating what needs fetching (just for safety).
4182 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4183 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4184 * already. This is ORed together with @a a_fWhat when
4185 * calculating what needs fetching (just for safety).
4186 */
4187template<uint64_t const a_fWhat,
4188 uint64_t const a_fDoneLocal = 0,
4189 uint64_t const a_fDonePostExit = 0
4190#ifndef IN_NEM_DARWIN
4191 | CPUMCTX_EXTRN_INHIBIT_INT
4192 | CPUMCTX_EXTRN_INHIBIT_NMI
4193# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4194 | HMVMX_CPUMCTX_EXTRN_ALL
4195# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4196 | CPUMCTX_EXTRN_RFLAGS
4197# endif
4198#else /* IN_NEM_DARWIN */
4199 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4200#endif /* IN_NEM_DARWIN */
4201>
4202DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4203{
4204 RT_NOREF_PV(pszCaller);
4205 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4206 {
4207#ifndef IN_NEM_DARWIN
4208 /*
4209 * We disable interrupts to make the updating of the state and in particular
4210 * the fExtrn modification atomic wrt to preemption hooks.
4211 */
4212 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4213#else
4214 RTCCUINTREG const fEFlags = 0;
4215#endif
4216
4217 /*
4218 * We combine all three parameters and take the (probably) inlined optimized
4219 * code path for the new things specified in a_fWhat.
4220 *
4221 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4222 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4223 * also take the streamlined path when both of these are cleared in fExtrn
4224 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4225 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4226 */
4227 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4228 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4229 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4230 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4231 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4232 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4233 {
4234 int const rc = vmxHCImportGuestStateInner< a_fWhat
4235 & HMVMX_CPUMCTX_EXTRN_ALL
4236 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4237#ifndef IN_NEM_DARWIN
4238 ASMSetFlags(fEFlags);
4239#endif
4240 return rc;
4241 }
4242
4243#ifndef IN_NEM_DARWIN
4244 ASMSetFlags(fEFlags);
4245#endif
4246
4247 /*
4248 * We shouldn't normally get here, but it may happen when executing
4249 * in the debug run-loops. Typically, everything should already have
4250 * been fetched then. Otherwise call the fallback state import function.
4251 */
4252 if (fWhatToDo == 0)
4253 { /* hope the cause was the debug loop or something similar */ }
4254 else
4255 {
4256 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4257 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4258 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4259 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4260 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4261 }
4262 }
4263 return VINF_SUCCESS;
4264}
4265
4266
4267/**
4268 * Check per-VM and per-VCPU force flag actions that require us to go back to
4269 * ring-3 for one reason or another.
4270 *
4271 * @returns Strict VBox status code (i.e. informational status codes too)
4272 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4273 * ring-3.
4274 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4275 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4276 * interrupts)
4277 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4278 * all EMTs to be in ring-3.
4279 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4280 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4281 * to the EM loop.
4282 *
4283 * @param pVCpu The cross context virtual CPU structure.
4284 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4285 * @param fStepping Whether we are single-stepping the guest using the
4286 * hypervisor debugger.
4287 *
4288 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4289 * is no longer in VMX non-root mode.
4290 */
4291static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4292{
4293#ifndef IN_NEM_DARWIN
4294 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4295#endif
4296
4297 /*
4298 * Update pending interrupts into the APIC's IRR.
4299 */
4300 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4301 APICUpdatePendingInterrupts(pVCpu);
4302
4303 /*
4304 * Anything pending? Should be more likely than not if we're doing a good job.
4305 */
4306 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4307 if ( !fStepping
4308 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4309 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4310 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4311 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4312 return VINF_SUCCESS;
4313
4314 /* Pending PGM C3 sync. */
4315 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4316 {
4317 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4318 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4319 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4320 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4321 if (rcStrict != VINF_SUCCESS)
4322 {
4323 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4324 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4325 return rcStrict;
4326 }
4327 }
4328
4329 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4330 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4331 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4332 {
4333 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4334 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4335 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4336 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4337 return rc;
4338 }
4339
4340 /* Pending VM request packets, such as hardware interrupts. */
4341 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4342 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4343 {
4344 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4345 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4346 return VINF_EM_PENDING_REQUEST;
4347 }
4348
4349 /* Pending PGM pool flushes. */
4350 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4351 {
4352 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4353 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4354 return VINF_PGM_POOL_FLUSH_PENDING;
4355 }
4356
4357 /* Pending DMA requests. */
4358 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4359 {
4360 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4361 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4362 return VINF_EM_RAW_TO_R3;
4363 }
4364
4365#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4366 /*
4367 * Pending nested-guest events.
4368 *
4369 * Please note the priority of these events are specified and important.
4370 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4371 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4372 */
4373 if (fIsNestedGuest)
4374 {
4375 /* Pending nested-guest APIC-write. */
4376 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4377 {
4378 Log4Func(("Pending nested-guest APIC-write\n"));
4379 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4380 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4381 return rcStrict;
4382 }
4383
4384 /* Pending nested-guest monitor-trap flag (MTF). */
4385 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4386 {
4387 Log4Func(("Pending nested-guest MTF\n"));
4388 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4389 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4390 return rcStrict;
4391 }
4392
4393 /* Pending nested-guest VMX-preemption timer expired. */
4394 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4395 {
4396 Log4Func(("Pending nested-guest preempt timer\n"));
4397 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4398 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4399 return rcStrict;
4400 }
4401 }
4402#else
4403 NOREF(fIsNestedGuest);
4404#endif
4405
4406 return VINF_SUCCESS;
4407}
4408
4409
4410/**
4411 * Converts any TRPM trap into a pending HM event. This is typically used when
4412 * entering from ring-3 (not longjmp returns).
4413 *
4414 * @param pVCpu The cross context virtual CPU structure.
4415 */
4416static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4417{
4418 Assert(TRPMHasTrap(pVCpu));
4419 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4420
4421 uint8_t uVector;
4422 TRPMEVENT enmTrpmEvent;
4423 uint32_t uErrCode;
4424 RTGCUINTPTR GCPtrFaultAddress;
4425 uint8_t cbInstr;
4426 bool fIcebp;
4427
4428 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4429 AssertRC(rc);
4430
4431 uint32_t u32IntInfo;
4432 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4433 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4434
4435 rc = TRPMResetTrap(pVCpu);
4436 AssertRC(rc);
4437 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4438 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4439
4440 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4441}
4442
4443
4444/**
4445 * Converts the pending HM event into a TRPM trap.
4446 *
4447 * @param pVCpu The cross context virtual CPU structure.
4448 */
4449static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4450{
4451 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4452
4453 /* If a trap was already pending, we did something wrong! */
4454 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4455
4456 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4457 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4458 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4459
4460 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4461
4462 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4463 AssertRC(rc);
4464
4465 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4466 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4467
4468 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4469 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4470 else
4471 {
4472 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4473 switch (uVectorType)
4474 {
4475 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4476 TRPMSetTrapDueToIcebp(pVCpu);
4477 RT_FALL_THRU();
4478 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4479 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4480 {
4481 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4482 || ( uVector == X86_XCPT_BP /* INT3 */
4483 || uVector == X86_XCPT_OF /* INTO */
4484 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4485 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4486 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4487 break;
4488 }
4489 }
4490 }
4491
4492 /* We're now done converting the pending event. */
4493 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4494}
4495
4496
4497/**
4498 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4499 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4500 *
4501 * @param pVCpu The cross context virtual CPU structure.
4502 * @param pVmcsInfo The VMCS info. object.
4503 */
4504static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4505{
4506 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4507 {
4508 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4509 {
4510 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4511 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4512 AssertRC(rc);
4513 }
4514 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4515}
4516
4517
4518/**
4519 * Clears the interrupt-window exiting control in the VMCS.
4520 *
4521 * @param pVCpu The cross context virtual CPU structure.
4522 * @param pVmcsInfo The VMCS info. object.
4523 */
4524DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4525{
4526 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4527 {
4528 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4529 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4530 AssertRC(rc);
4531 }
4532}
4533
4534
4535/**
4536 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4537 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4538 *
4539 * @param pVCpu The cross context virtual CPU structure.
4540 * @param pVmcsInfo The VMCS info. object.
4541 */
4542static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4543{
4544 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4545 {
4546 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4547 {
4548 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4549 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4550 AssertRC(rc);
4551 Log4Func(("Setup NMI-window exiting\n"));
4552 }
4553 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4554}
4555
4556
4557/**
4558 * Clears the NMI-window exiting control in the VMCS.
4559 *
4560 * @param pVCpu The cross context virtual CPU structure.
4561 * @param pVmcsInfo The VMCS info. object.
4562 */
4563DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4564{
4565 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4566 {
4567 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4568 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4569 AssertRC(rc);
4570 }
4571}
4572
4573
4574/**
4575 * Injects an event into the guest upon VM-entry by updating the relevant fields
4576 * in the VM-entry area in the VMCS.
4577 *
4578 * @returns Strict VBox status code (i.e. informational status codes too).
4579 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4580 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4581 *
4582 * @param pVCpu The cross context virtual CPU structure.
4583 * @param pVmcsInfo The VMCS info object.
4584 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4585 * @param pEvent The event being injected.
4586 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4587 * will be updated if necessary. This cannot not be NULL.
4588 * @param fStepping Whether we're single-stepping guest execution and should
4589 * return VINF_EM_DBG_STEPPED if the event is injected
4590 * directly (registers modified by us, not by hardware on
4591 * VM-entry).
4592 */
4593static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4594 bool fStepping, uint32_t *pfIntrState)
4595{
4596 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4597 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4598 Assert(pfIntrState);
4599
4600#ifdef IN_NEM_DARWIN
4601 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4602#endif
4603
4604 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4605 uint32_t u32IntInfo = pEvent->u64IntInfo;
4606 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4607 uint32_t const cbInstr = pEvent->cbInstr;
4608 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4609 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4610 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4611
4612#ifdef VBOX_STRICT
4613 /*
4614 * Validate the error-code-valid bit for hardware exceptions.
4615 * No error codes for exceptions in real-mode.
4616 *
4617 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4618 */
4619 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4620 && !CPUMIsGuestInRealModeEx(pCtx))
4621 {
4622 switch (uVector)
4623 {
4624 case X86_XCPT_PF:
4625 case X86_XCPT_DF:
4626 case X86_XCPT_TS:
4627 case X86_XCPT_NP:
4628 case X86_XCPT_SS:
4629 case X86_XCPT_GP:
4630 case X86_XCPT_AC:
4631 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4632 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4633 RT_FALL_THRU();
4634 default:
4635 break;
4636 }
4637 }
4638
4639 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4640 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4641 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4642#endif
4643
4644 RT_NOREF(uVector);
4645 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4646 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4647 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4648 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4649 {
4650 Assert(uVector <= X86_XCPT_LAST);
4651 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4652 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4653 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4654 }
4655 else
4656 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4657
4658 /*
4659 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4660 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4661 * interrupt handler in the (real-mode) guest.
4662 *
4663 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4664 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4665 */
4666 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4667 {
4668#ifndef IN_NEM_DARWIN
4669 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4670#endif
4671 {
4672 /*
4673 * For CPUs with unrestricted guest execution enabled and with the guest
4674 * in real-mode, we must not set the deliver-error-code bit.
4675 *
4676 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4677 */
4678 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4679 }
4680#ifndef IN_NEM_DARWIN
4681 else
4682 {
4683 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4684 Assert(PDMVmmDevHeapIsEnabled(pVM));
4685 Assert(pVM->hm.s.vmx.pRealModeTSS);
4686 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4687
4688 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4689 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4690 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4691 AssertRCReturn(rc2, rc2);
4692
4693 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4694 size_t const cbIdtEntry = sizeof(X86IDTR16);
4695 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4696 {
4697 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4698 if (uVector == X86_XCPT_DF)
4699 return VINF_EM_RESET;
4700
4701 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4702 No error codes for exceptions in real-mode. */
4703 if (uVector == X86_XCPT_GP)
4704 {
4705 static HMEVENT const s_EventXcptDf
4706 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4707 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4708 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4709 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4710 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4711 }
4712
4713 /*
4714 * If we're injecting an event with no valid IDT entry, inject a #GP.
4715 * No error codes for exceptions in real-mode.
4716 *
4717 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4718 */
4719 static HMEVENT const s_EventXcptGp
4720 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4721 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4722 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4723 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4724 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4725 }
4726
4727 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4728 uint16_t uGuestIp = pCtx->ip;
4729 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4730 {
4731 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4732 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4733 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4734 }
4735 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4736 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4737
4738 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4739 X86IDTR16 IdtEntry;
4740 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4741 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4742 AssertRCReturn(rc2, rc2);
4743
4744 /* Construct the stack frame for the interrupt/exception handler. */
4745 VBOXSTRICTRC rcStrict;
4746 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4747 if (rcStrict == VINF_SUCCESS)
4748 {
4749 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4750 if (rcStrict == VINF_SUCCESS)
4751 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4752 }
4753
4754 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4755 if (rcStrict == VINF_SUCCESS)
4756 {
4757 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4758 pCtx->rip = IdtEntry.offSel;
4759 pCtx->cs.Sel = IdtEntry.uSel;
4760 pCtx->cs.ValidSel = IdtEntry.uSel;
4761 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4762 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4763 && uVector == X86_XCPT_PF)
4764 pCtx->cr2 = GCPtrFault;
4765
4766 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4767 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4768 | HM_CHANGED_GUEST_RSP);
4769
4770 /*
4771 * If we delivered a hardware exception (other than an NMI) and if there was
4772 * block-by-STI in effect, we should clear it.
4773 */
4774 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4775 {
4776 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4777 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4778 Log4Func(("Clearing inhibition due to STI\n"));
4779 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4780 }
4781
4782 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4783 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4784
4785 /*
4786 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4787 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4788 */
4789 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4790
4791 /*
4792 * If we eventually support nested-guest execution without unrestricted guest execution,
4793 * we should set fInterceptEvents here.
4794 */
4795 Assert(!fIsNestedGuest);
4796
4797 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4798 if (fStepping)
4799 rcStrict = VINF_EM_DBG_STEPPED;
4800 }
4801 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4802 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4803 return rcStrict;
4804 }
4805#else
4806 RT_NOREF(pVmcsInfo);
4807#endif
4808 }
4809
4810 /*
4811 * Validate.
4812 */
4813 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4814 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4815
4816 /*
4817 * Inject the event into the VMCS.
4818 */
4819 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4820 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4821 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4822 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4823 AssertRC(rc);
4824
4825 /*
4826 * Update guest CR2 if this is a page-fault.
4827 */
4828 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4829 pCtx->cr2 = GCPtrFault;
4830
4831 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4832 return VINF_SUCCESS;
4833}
4834
4835
4836/**
4837 * Evaluates the event to be delivered to the guest and sets it as the pending
4838 * event.
4839 *
4840 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4841 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4842 * NOT restore these force-flags.
4843 *
4844 * @returns Strict VBox status code (i.e. informational status codes too).
4845 * @param pVCpu The cross context virtual CPU structure.
4846 * @param pVmcsInfo The VMCS information structure.
4847 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4848 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4849 */
4850static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4851{
4852 Assert(pfIntrState);
4853 Assert(!TRPMHasTrap(pVCpu));
4854
4855 /*
4856 * Compute/update guest-interruptibility state related FFs.
4857 * The FFs will be used below while evaluating events to be injected.
4858 */
4859 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4860
4861 /*
4862 * Evaluate if a new event needs to be injected.
4863 * An event that's already pending has already performed all necessary checks.
4864 */
4865 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4866 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4867 {
4868 /** @todo SMI. SMIs take priority over NMIs. */
4869
4870 /*
4871 * NMIs.
4872 * NMIs take priority over external interrupts.
4873 */
4874#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4875 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4876#endif
4877 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4878 {
4879 /*
4880 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4881 *
4882 * For a nested-guest, the FF always indicates the outer guest's ability to
4883 * receive an NMI while the guest-interruptibility state bit depends on whether
4884 * the nested-hypervisor is using virtual-NMIs.
4885 */
4886 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4887 {
4888#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4889 if ( fIsNestedGuest
4890 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4891 return IEMExecVmxVmexitXcptNmi(pVCpu);
4892#endif
4893 vmxHCSetPendingXcptNmi(pVCpu);
4894 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4895 Log4Func(("NMI pending injection\n"));
4896
4897 /* We've injected the NMI, bail. */
4898 return VINF_SUCCESS;
4899 }
4900 if (!fIsNestedGuest)
4901 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4902 }
4903
4904 /*
4905 * External interrupts (PIC/APIC).
4906 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4907 * We cannot re-request the interrupt from the controller again.
4908 */
4909 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4910 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4911 {
4912 Assert(!DBGFIsStepping(pVCpu));
4913 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4914 AssertRC(rc);
4915
4916 /*
4917 * We must not check EFLAGS directly when executing a nested-guest, use
4918 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4919 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4920 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4921 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4922 *
4923 * See Intel spec. 25.4.1 "Event Blocking".
4924 */
4925 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4926 {
4927#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4928 if ( fIsNestedGuest
4929 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4930 {
4931 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4932 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4933 return rcStrict;
4934 }
4935#endif
4936 uint8_t u8Interrupt;
4937 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4938 if (RT_SUCCESS(rc))
4939 {
4940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4941 if ( fIsNestedGuest
4942 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4943 {
4944 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4945 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4946 return rcStrict;
4947 }
4948#endif
4949 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4950 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4951 }
4952 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4953 {
4954 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4955
4956 if ( !fIsNestedGuest
4957 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4958 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4959 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4960
4961 /*
4962 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4963 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4964 * need to re-set this force-flag here.
4965 */
4966 }
4967 else
4968 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4969
4970 /* We've injected the interrupt or taken necessary action, bail. */
4971 return VINF_SUCCESS;
4972 }
4973 if (!fIsNestedGuest)
4974 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4975 }
4976 }
4977 else if (!fIsNestedGuest)
4978 {
4979 /*
4980 * An event is being injected or we are in an interrupt shadow. Check if another event is
4981 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4982 * the pending event.
4983 */
4984 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4985 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4986 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4987 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4988 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4989 }
4990 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4991
4992 return VINF_SUCCESS;
4993}
4994
4995
4996/**
4997 * Injects any pending events into the guest if the guest is in a state to
4998 * receive them.
4999 *
5000 * @returns Strict VBox status code (i.e. informational status codes too).
5001 * @param pVCpu The cross context virtual CPU structure.
5002 * @param pVmcsInfo The VMCS information structure.
5003 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5004 * @param fIntrState The VT-x guest-interruptibility state.
5005 * @param fStepping Whether we are single-stepping the guest using the
5006 * hypervisor debugger and should return
5007 * VINF_EM_DBG_STEPPED if the event was dispatched
5008 * directly.
5009 */
5010static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5011 uint32_t fIntrState, bool fStepping)
5012{
5013 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5014#ifndef IN_NEM_DARWIN
5015 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5016#endif
5017
5018#ifdef VBOX_STRICT
5019 /*
5020 * Verify guest-interruptibility state.
5021 *
5022 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5023 * since injecting an event may modify the interruptibility state and we must thus always
5024 * use fIntrState.
5025 */
5026 {
5027 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5028 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5029 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5030 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5031 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5032 Assert(!TRPMHasTrap(pVCpu));
5033 NOREF(fBlockMovSS); NOREF(fBlockSti);
5034 }
5035#endif
5036
5037 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5038 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5039 {
5040 /*
5041 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5042 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5043 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5044 *
5045 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5046 */
5047 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5048#ifdef VBOX_STRICT
5049 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5050 {
5051 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5052 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5053 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5054 }
5055 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5056 {
5057 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5058 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5059 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5060 }
5061#endif
5062 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5063 uIntType));
5064
5065 /*
5066 * Inject the event and get any changes to the guest-interruptibility state.
5067 *
5068 * The guest-interruptibility state may need to be updated if we inject the event
5069 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5070 */
5071 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5072 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5073
5074 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5076 else
5077 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5078 }
5079
5080 /*
5081 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5082 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5083 */
5084 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5085 && !fIsNestedGuest)
5086 {
5087 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5088
5089 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5090 {
5091 /*
5092 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5093 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5094 */
5095 Assert(!DBGFIsStepping(pVCpu));
5096 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5097 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5098 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5099 AssertRC(rc);
5100 }
5101 else
5102 {
5103 /*
5104 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5105 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5106 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5107 * we use MTF, so just make sure it's called before executing guest-code.
5108 */
5109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5110 }
5111 }
5112 /* else: for nested-guest currently handling while merging controls. */
5113
5114 /*
5115 * Finally, update the guest-interruptibility state.
5116 *
5117 * This is required for the real-on-v86 software interrupt injection, for
5118 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5119 */
5120 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5121 AssertRC(rc);
5122
5123 /*
5124 * There's no need to clear the VM-entry interruption-information field here if we're not
5125 * injecting anything. VT-x clears the valid bit on every VM-exit.
5126 *
5127 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5128 */
5129
5130 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5131 return rcStrict;
5132}
5133
5134
5135/**
5136 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5137 * and update error record fields accordingly.
5138 *
5139 * @returns VMX_IGS_* error codes.
5140 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5141 * wrong with the guest state.
5142 *
5143 * @param pVCpu The cross context virtual CPU structure.
5144 * @param pVmcsInfo The VMCS info. object.
5145 *
5146 * @remarks This function assumes our cache of the VMCS controls
5147 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5148 */
5149static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5150{
5151#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5152#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5153
5154 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5155 uint32_t uError = VMX_IGS_ERROR;
5156 uint32_t u32IntrState = 0;
5157#ifndef IN_NEM_DARWIN
5158 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5159 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5160#else
5161 bool const fUnrestrictedGuest = true;
5162#endif
5163 do
5164 {
5165 int rc;
5166
5167 /*
5168 * Guest-interruptibility state.
5169 *
5170 * Read this first so that any check that fails prior to those that actually
5171 * require the guest-interruptibility state would still reflect the correct
5172 * VMCS value and avoids causing further confusion.
5173 */
5174 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5175 AssertRC(rc);
5176
5177 uint32_t u32Val;
5178 uint64_t u64Val;
5179
5180 /*
5181 * CR0.
5182 */
5183 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5184 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5185 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5186 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5187 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5188 if (fUnrestrictedGuest)
5189 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5190
5191 uint64_t u64GuestCr0;
5192 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5193 AssertRC(rc);
5194 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5195 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5196 if ( !fUnrestrictedGuest
5197 && (u64GuestCr0 & X86_CR0_PG)
5198 && !(u64GuestCr0 & X86_CR0_PE))
5199 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5200
5201 /*
5202 * CR4.
5203 */
5204 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5205 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5206 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5207
5208 uint64_t u64GuestCr4;
5209 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5210 AssertRC(rc);
5211 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5212 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5213
5214 /*
5215 * IA32_DEBUGCTL MSR.
5216 */
5217 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5218 AssertRC(rc);
5219 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5220 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5221 {
5222 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5223 }
5224 uint64_t u64DebugCtlMsr = u64Val;
5225
5226#ifdef VBOX_STRICT
5227 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5228 AssertRC(rc);
5229 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5230#endif
5231 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5232
5233 /*
5234 * RIP and RFLAGS.
5235 */
5236 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5237 AssertRC(rc);
5238 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5239 if ( !fLongModeGuest
5240 || !pCtx->cs.Attr.n.u1Long)
5241 {
5242 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5243 }
5244 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5245 * must be identical if the "IA-32e mode guest" VM-entry
5246 * control is 1 and CS.L is 1. No check applies if the
5247 * CPU supports 64 linear-address bits. */
5248
5249 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5250 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5251 AssertRC(rc);
5252 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5253 VMX_IGS_RFLAGS_RESERVED);
5254 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5255 uint32_t const u32Eflags = u64Val;
5256
5257 if ( fLongModeGuest
5258 || ( fUnrestrictedGuest
5259 && !(u64GuestCr0 & X86_CR0_PE)))
5260 {
5261 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5262 }
5263
5264 uint32_t u32EntryInfo;
5265 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5266 AssertRC(rc);
5267 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5268 {
5269 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5270 }
5271
5272 /*
5273 * 64-bit checks.
5274 */
5275 if (fLongModeGuest)
5276 {
5277 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5278 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5279 }
5280
5281 if ( !fLongModeGuest
5282 && (u64GuestCr4 & X86_CR4_PCIDE))
5283 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5284
5285 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5286 * 51:32 beyond the processor's physical-address width are 0. */
5287
5288 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5289 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5290 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5291
5292#ifndef IN_NEM_DARWIN
5293 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5294 AssertRC(rc);
5295 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5296
5297 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5298 AssertRC(rc);
5299 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5300#endif
5301
5302 /*
5303 * PERF_GLOBAL MSR.
5304 */
5305 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5306 {
5307 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5308 AssertRC(rc);
5309 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5310 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5311 }
5312
5313 /*
5314 * PAT MSR.
5315 */
5316 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5317 {
5318 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5319 AssertRC(rc);
5320 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5321 for (unsigned i = 0; i < 8; i++)
5322 {
5323 uint8_t u8Val = (u64Val & 0xff);
5324 if ( u8Val != 0 /* UC */
5325 && u8Val != 1 /* WC */
5326 && u8Val != 4 /* WT */
5327 && u8Val != 5 /* WP */
5328 && u8Val != 6 /* WB */
5329 && u8Val != 7 /* UC- */)
5330 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5331 u64Val >>= 8;
5332 }
5333 }
5334
5335 /*
5336 * EFER MSR.
5337 */
5338 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5339 {
5340 Assert(g_fHmVmxSupportsVmcsEfer);
5341 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5342 AssertRC(rc);
5343 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5344 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5345 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5346 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5347 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5348 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5349 * iemVmxVmentryCheckGuestState(). */
5350 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5351 || !(u64GuestCr0 & X86_CR0_PG)
5352 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5353 VMX_IGS_EFER_LMA_LME_MISMATCH);
5354 }
5355
5356 /*
5357 * Segment registers.
5358 */
5359 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5360 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5361 if (!(u32Eflags & X86_EFL_VM))
5362 {
5363 /* CS */
5364 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5365 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5366 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5367 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5368 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5369 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5370 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5371 /* CS cannot be loaded with NULL in protected mode. */
5372 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5373 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5374 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5375 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5376 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5377 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5378 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5379 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5380 else
5381 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5382
5383 /* SS */
5384 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5385 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5386 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5387 if ( !(pCtx->cr0 & X86_CR0_PE)
5388 || pCtx->cs.Attr.n.u4Type == 3)
5389 {
5390 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5391 }
5392
5393 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5394 {
5395 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5396 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5397 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5398 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5399 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5400 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5401 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5402 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5403 }
5404
5405 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5406 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5407 {
5408 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5409 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5410 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5411 || pCtx->ds.Attr.n.u4Type > 11
5412 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5413 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5414 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5415 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5416 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5417 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5418 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5419 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5420 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5421 }
5422 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5423 {
5424 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5425 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5426 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5427 || pCtx->es.Attr.n.u4Type > 11
5428 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5429 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5430 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5431 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5432 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5433 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5434 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5435 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5436 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5437 }
5438 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5439 {
5440 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5441 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5442 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5443 || pCtx->fs.Attr.n.u4Type > 11
5444 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5445 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5446 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5447 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5448 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5449 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5450 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5451 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5452 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5453 }
5454 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5455 {
5456 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5457 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5458 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5459 || pCtx->gs.Attr.n.u4Type > 11
5460 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5461 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5462 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5463 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5464 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5465 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5466 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5467 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5468 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5469 }
5470 /* 64-bit capable CPUs. */
5471 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5472 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5473 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5474 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5475 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5476 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5477 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5478 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5479 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5480 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5481 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5482 }
5483 else
5484 {
5485 /* V86 mode checks. */
5486 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5487 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5488 {
5489 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5490 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5491 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5492 }
5493 else
5494 {
5495 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5496 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5497 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5498 }
5499
5500 /* CS */
5501 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5502 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5503 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5504 /* SS */
5505 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5506 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5507 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5508 /* DS */
5509 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5510 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5511 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5512 /* ES */
5513 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5514 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5515 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5516 /* FS */
5517 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5518 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5519 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5520 /* GS */
5521 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5522 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5523 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5524 /* 64-bit capable CPUs. */
5525 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5526 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5527 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5528 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5529 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5530 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5531 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5532 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5533 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5534 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5535 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5536 }
5537
5538 /*
5539 * TR.
5540 */
5541 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5542 /* 64-bit capable CPUs. */
5543 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5544 if (fLongModeGuest)
5545 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5546 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5547 else
5548 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5549 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5550 VMX_IGS_TR_ATTR_TYPE_INVALID);
5551 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5552 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5553 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5554 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5555 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5556 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5557 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5558 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5559
5560 /*
5561 * GDTR and IDTR (64-bit capable checks).
5562 */
5563 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5564 AssertRC(rc);
5565 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5566
5567 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5568 AssertRC(rc);
5569 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5570
5571 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5572 AssertRC(rc);
5573 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5574
5575 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5576 AssertRC(rc);
5577 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5578
5579 /*
5580 * Guest Non-Register State.
5581 */
5582 /* Activity State. */
5583 uint32_t u32ActivityState;
5584 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5585 AssertRC(rc);
5586 HMVMX_CHECK_BREAK( !u32ActivityState
5587 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5588 VMX_IGS_ACTIVITY_STATE_INVALID);
5589 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5590 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5591
5592 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5593 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5594 {
5595 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5596 }
5597
5598 /** @todo Activity state and injecting interrupts. Left as a todo since we
5599 * currently don't use activity states but ACTIVE. */
5600
5601 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5602 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5603
5604 /* Guest interruptibility-state. */
5605 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5606 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5607 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5608 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5609 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5610 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5611 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5612 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5613 {
5614 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5615 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5616 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5617 }
5618 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5619 {
5620 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5621 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5622 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5623 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5624 }
5625 /** @todo Assumes the processor is not in SMM. */
5626 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5627 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5628 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5629 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5630 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5631 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5632 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5633 {
5634 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5635 }
5636
5637 /* Pending debug exceptions. */
5638 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5639 AssertRC(rc);
5640 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5641 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5642 u32Val = u64Val; /* For pending debug exceptions checks below. */
5643
5644 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5645 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5646 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5647 {
5648 if ( (u32Eflags & X86_EFL_TF)
5649 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5650 {
5651 /* Bit 14 is PendingDebug.BS. */
5652 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5653 }
5654 if ( !(u32Eflags & X86_EFL_TF)
5655 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5656 {
5657 /* Bit 14 is PendingDebug.BS. */
5658 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5659 }
5660 }
5661
5662#ifndef IN_NEM_DARWIN
5663 /* VMCS link pointer. */
5664 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5665 AssertRC(rc);
5666 if (u64Val != UINT64_C(0xffffffffffffffff))
5667 {
5668 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5669 /** @todo Bits beyond the processor's physical-address width MBZ. */
5670 /** @todo SMM checks. */
5671 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5672 Assert(pVmcsInfo->pvShadowVmcs);
5673 VMXVMCSREVID VmcsRevId;
5674 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5675 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5676 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5677 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5678 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5679 }
5680
5681 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5682 * not using nested paging? */
5683 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5684 && !fLongModeGuest
5685 && CPUMIsGuestInPAEModeEx(pCtx))
5686 {
5687 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5688 AssertRC(rc);
5689 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5690
5691 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5692 AssertRC(rc);
5693 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5694
5695 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5696 AssertRC(rc);
5697 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5698
5699 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5700 AssertRC(rc);
5701 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5702 }
5703#endif
5704
5705 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5706 if (uError == VMX_IGS_ERROR)
5707 uError = VMX_IGS_REASON_NOT_FOUND;
5708 } while (0);
5709
5710 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5711 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5712 return uError;
5713
5714#undef HMVMX_ERROR_BREAK
5715#undef HMVMX_CHECK_BREAK
5716}
5717
5718
5719#ifndef HMVMX_USE_FUNCTION_TABLE
5720/**
5721 * Handles a guest VM-exit from hardware-assisted VMX execution.
5722 *
5723 * @returns Strict VBox status code (i.e. informational status codes too).
5724 * @param pVCpu The cross context virtual CPU structure.
5725 * @param pVmxTransient The VMX-transient structure.
5726 */
5727DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5728{
5729#ifdef DEBUG_ramshankar
5730# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5731 do { \
5732 if (a_fSave != 0) \
5733 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5734 VBOXSTRICTRC rcStrict = a_CallExpr; \
5735 if (a_fSave != 0) \
5736 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5737 return rcStrict; \
5738 } while (0)
5739#else
5740# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5741#endif
5742 uint32_t const uExitReason = pVmxTransient->uExitReason;
5743 switch (uExitReason)
5744 {
5745 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5746 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5747 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5748 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5749 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5750 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5751 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5752 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5753 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5754 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5755 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5756 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5757 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5758 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5759 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5760 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5761 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5762 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5763 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5764 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5765 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5766 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5767 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5768 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5769 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5770 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5771 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5772 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5773 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5774 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5775#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5776 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5777 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5778 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5779 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5780 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5781 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5782 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5783 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5784 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5785 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5786#else
5787 case VMX_EXIT_VMCLEAR:
5788 case VMX_EXIT_VMLAUNCH:
5789 case VMX_EXIT_VMPTRLD:
5790 case VMX_EXIT_VMPTRST:
5791 case VMX_EXIT_VMREAD:
5792 case VMX_EXIT_VMRESUME:
5793 case VMX_EXIT_VMWRITE:
5794 case VMX_EXIT_VMXOFF:
5795 case VMX_EXIT_VMXON:
5796 case VMX_EXIT_INVVPID:
5797 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5798#endif
5799#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5800 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5801#else
5802 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5803#endif
5804
5805 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5806 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5807 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5808
5809 case VMX_EXIT_INIT_SIGNAL:
5810 case VMX_EXIT_SIPI:
5811 case VMX_EXIT_IO_SMI:
5812 case VMX_EXIT_SMI:
5813 case VMX_EXIT_ERR_MSR_LOAD:
5814 case VMX_EXIT_ERR_MACHINE_CHECK:
5815 case VMX_EXIT_PML_FULL:
5816 case VMX_EXIT_VIRTUALIZED_EOI:
5817 case VMX_EXIT_GDTR_IDTR_ACCESS:
5818 case VMX_EXIT_LDTR_TR_ACCESS:
5819 case VMX_EXIT_APIC_WRITE:
5820 case VMX_EXIT_RDRAND:
5821 case VMX_EXIT_RSM:
5822 case VMX_EXIT_VMFUNC:
5823 case VMX_EXIT_ENCLS:
5824 case VMX_EXIT_RDSEED:
5825 case VMX_EXIT_XSAVES:
5826 case VMX_EXIT_XRSTORS:
5827 case VMX_EXIT_UMWAIT:
5828 case VMX_EXIT_TPAUSE:
5829 case VMX_EXIT_LOADIWKEY:
5830 default:
5831 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5832 }
5833#undef VMEXIT_CALL_RET
5834}
5835#endif /* !HMVMX_USE_FUNCTION_TABLE */
5836
5837
5838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5839/**
5840 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5841 *
5842 * @returns Strict VBox status code (i.e. informational status codes too).
5843 * @param pVCpu The cross context virtual CPU structure.
5844 * @param pVmxTransient The VMX-transient structure.
5845 */
5846DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5847{
5848#ifdef DEBUG_ramshankar
5849# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5850 do { \
5851 if (a_fSave != 0) \
5852 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5853 VBOXSTRICTRC rcStrict = a_CallExpr; \
5854 return rcStrict; \
5855 } while (0)
5856#else
5857# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5858#endif
5859
5860 uint32_t const uExitReason = pVmxTransient->uExitReason;
5861 switch (uExitReason)
5862 {
5863# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5864 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
5865 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
5866# else
5867 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5868 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5869# endif
5870 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
5871 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
5872 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
5873
5874 /*
5875 * We shouldn't direct host physical interrupts to the nested-guest.
5876 */
5877 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5878
5879 /*
5880 * Instructions that cause VM-exits unconditionally or the condition is
5881 * always taken solely from the nested hypervisor (meaning if the VM-exit
5882 * happens, it's guaranteed to be a nested-guest VM-exit).
5883 *
5884 * - Provides VM-exit instruction length ONLY.
5885 */
5886 case VMX_EXIT_CPUID: /* Unconditional. */
5887 case VMX_EXIT_VMCALL:
5888 case VMX_EXIT_GETSEC:
5889 case VMX_EXIT_INVD:
5890 case VMX_EXIT_XSETBV:
5891 case VMX_EXIT_VMLAUNCH:
5892 case VMX_EXIT_VMRESUME:
5893 case VMX_EXIT_VMXOFF:
5894 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5895 case VMX_EXIT_VMFUNC:
5896 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
5897
5898 /*
5899 * Instructions that cause VM-exits unconditionally or the condition is
5900 * always taken solely from the nested hypervisor (meaning if the VM-exit
5901 * happens, it's guaranteed to be a nested-guest VM-exit).
5902 *
5903 * - Provides VM-exit instruction length.
5904 * - Provides VM-exit information.
5905 * - Optionally provides Exit qualification.
5906 *
5907 * Since Exit qualification is 0 for all VM-exits where it is not
5908 * applicable, reading and passing it to the guest should produce
5909 * defined behavior.
5910 *
5911 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5912 */
5913 case VMX_EXIT_INVEPT: /* Unconditional. */
5914 case VMX_EXIT_INVVPID:
5915 case VMX_EXIT_VMCLEAR:
5916 case VMX_EXIT_VMPTRLD:
5917 case VMX_EXIT_VMPTRST:
5918 case VMX_EXIT_VMXON:
5919 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5920 case VMX_EXIT_LDTR_TR_ACCESS:
5921 case VMX_EXIT_RDRAND:
5922 case VMX_EXIT_RDSEED:
5923 case VMX_EXIT_XSAVES:
5924 case VMX_EXIT_XRSTORS:
5925 case VMX_EXIT_UMWAIT:
5926 case VMX_EXIT_TPAUSE:
5927 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
5928
5929 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
5930 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
5931 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
5932 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
5933 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
5934 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
5935 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
5936 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
5937 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
5938 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
5939 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
5940 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
5941 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
5942 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
5943 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
5944 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
5945 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
5946 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
5947 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
5948
5949 case VMX_EXIT_PREEMPT_TIMER:
5950 {
5951 /** @todo NSTVMX: Preempt timer. */
5952 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5953 }
5954
5955 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
5956 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
5957
5958 case VMX_EXIT_VMREAD:
5959 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
5960
5961 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
5962 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
5963
5964 case VMX_EXIT_INIT_SIGNAL:
5965 case VMX_EXIT_SIPI:
5966 case VMX_EXIT_IO_SMI:
5967 case VMX_EXIT_SMI:
5968 case VMX_EXIT_ERR_MSR_LOAD:
5969 case VMX_EXIT_ERR_MACHINE_CHECK:
5970 case VMX_EXIT_PML_FULL:
5971 case VMX_EXIT_RSM:
5972 default:
5973 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5974 }
5975#undef VMEXIT_CALL_RET
5976}
5977#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5978
5979
5980/** @name VM-exit helpers.
5981 * @{
5982 */
5983/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5984/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5985/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5986
5987/** Macro for VM-exits called unexpectedly. */
5988#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5989 do { \
5990 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5991 return VERR_VMX_UNEXPECTED_EXIT; \
5992 } while (0)
5993
5994#ifdef VBOX_STRICT
5995# ifndef IN_NEM_DARWIN
5996/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5997# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5998 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5999
6000# define HMVMX_ASSERT_PREEMPT_CPUID() \
6001 do { \
6002 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6003 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6004 } while (0)
6005
6006# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6007 do { \
6008 AssertPtr((a_pVCpu)); \
6009 AssertPtr((a_pVmxTransient)); \
6010 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6011 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6012 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6013 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6014 Assert((a_pVmxTransient)->pVmcsInfo); \
6015 Assert(ASMIntAreEnabled()); \
6016 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6017 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6018 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6019 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6020 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6021 HMVMX_ASSERT_PREEMPT_CPUID(); \
6022 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6023 } while (0)
6024# else
6025# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6026# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6027# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6028 do { \
6029 AssertPtr((a_pVCpu)); \
6030 AssertPtr((a_pVmxTransient)); \
6031 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6032 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6033 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6034 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6035 Assert((a_pVmxTransient)->pVmcsInfo); \
6036 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6037 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6038 } while (0)
6039# endif
6040
6041# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6042 do { \
6043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6044 Assert((a_pVmxTransient)->fIsNestedGuest); \
6045 } while (0)
6046
6047# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6048 do { \
6049 Log4Func(("\n")); \
6050 } while (0)
6051#else
6052# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6053 do { \
6054 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6055 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6056 } while (0)
6057
6058# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6059 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6060
6061# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6062#endif
6063
6064#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6065/** Macro that does the necessary privilege checks and intercepted VM-exits for
6066 * guests that attempted to execute a VMX instruction. */
6067# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6068 do \
6069 { \
6070 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6071 if (rcStrictTmp == VINF_SUCCESS) \
6072 { /* likely */ } \
6073 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6074 { \
6075 Assert((a_pVCpu)->hm.s.Event.fPending); \
6076 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6077 return VINF_SUCCESS; \
6078 } \
6079 else \
6080 { \
6081 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6082 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6083 } \
6084 } while (0)
6085
6086/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6087# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6088 do \
6089 { \
6090 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6091 (a_pGCPtrEffAddr)); \
6092 if (rcStrictTmp == VINF_SUCCESS) \
6093 { /* likely */ } \
6094 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6095 { \
6096 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6097 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6098 NOREF(uXcptTmp); \
6099 return VINF_SUCCESS; \
6100 } \
6101 else \
6102 { \
6103 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6104 return rcStrictTmp; \
6105 } \
6106 } while (0)
6107#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6108
6109
6110/**
6111 * Advances the guest RIP by the specified number of bytes.
6112 *
6113 * @param pVCpu The cross context virtual CPU structure.
6114 * @param cbInstr Number of bytes to advance the RIP by.
6115 *
6116 * @remarks No-long-jump zone!!!
6117 */
6118DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6119{
6120 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6121
6122 /*
6123 * Advance RIP.
6124 *
6125 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6126 * when the addition causes a "carry" into the upper half and check whether
6127 * we're in 64-bit and can go on with it or wether we should zap the top
6128 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6129 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6130 *
6131 * See PC wrap around tests in bs3-cpu-weird-1.
6132 */
6133 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6134 uint64_t const uRipNext = uRipPrev + cbInstr;
6135 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6136 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6137 pVCpu->cpum.GstCtx.rip = uRipNext;
6138 else
6139 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6140
6141 /*
6142 * Clear RF and interrupt shadowing.
6143 */
6144 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6145 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6146 else
6147 {
6148 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6149 {
6150 /** @todo \#DB - single step. */
6151 }
6152 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6153 }
6154 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6155
6156 /* Mark both RIP and RFLAGS as updated. */
6157 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6158}
6159
6160
6161/**
6162 * Advances the guest RIP after reading it from the VMCS.
6163 *
6164 * @returns VBox status code, no informational status codes.
6165 * @param pVCpu The cross context virtual CPU structure.
6166 * @param pVmxTransient The VMX-transient structure.
6167 *
6168 * @remarks No-long-jump zone!!!
6169 */
6170static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6171{
6172 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6173 /** @todo consider template here after checking callers. */
6174 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6175 AssertRCReturn(rc, rc);
6176
6177 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6178 return VINF_SUCCESS;
6179}
6180
6181
6182/**
6183 * Handle a condition that occurred while delivering an event through the guest or
6184 * nested-guest IDT.
6185 *
6186 * @returns Strict VBox status code (i.e. informational status codes too).
6187 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6188 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6189 * to continue execution of the guest which will delivery the \#DF.
6190 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6191 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6192 *
6193 * @param pVCpu The cross context virtual CPU structure.
6194 * @param pVmxTransient The VMX-transient structure.
6195 *
6196 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6197 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6198 * is due to an EPT violation, PML full or SPP-related event.
6199 *
6200 * @remarks No-long-jump zone!!!
6201 */
6202static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6203{
6204 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6205 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6206 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6207 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6208 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6209 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6210
6211 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6212 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6213 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6214 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6215 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6216 {
6217 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6218 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6219
6220 /*
6221 * If the event was a software interrupt (generated with INT n) or a software exception
6222 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6223 * can handle the VM-exit and continue guest execution which will re-execute the
6224 * instruction rather than re-injecting the exception, as that can cause premature
6225 * trips to ring-3 before injection and involve TRPM which currently has no way of
6226 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6227 * the problem).
6228 */
6229 IEMXCPTRAISE enmRaise;
6230 IEMXCPTRAISEINFO fRaiseInfo;
6231 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6232 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6233 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6234 {
6235 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6236 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6237 }
6238 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6239 {
6240 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6241 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6242 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6243
6244 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6245 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6246
6247 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6248
6249 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6250 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6251 {
6252 pVmxTransient->fVectoringPF = true;
6253 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6254 }
6255 }
6256 else
6257 {
6258 /*
6259 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6260 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6261 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6262 */
6263 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6264 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6265 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6266 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6267 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6268 }
6269
6270 /*
6271 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6272 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6273 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6274 * subsequent VM-entry would fail, see @bugref{7445}.
6275 *
6276 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6277 */
6278 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6279 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6280 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6281 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6282 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6283
6284 switch (enmRaise)
6285 {
6286 case IEMXCPTRAISE_CURRENT_XCPT:
6287 {
6288 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6289 Assert(rcStrict == VINF_SUCCESS);
6290 break;
6291 }
6292
6293 case IEMXCPTRAISE_PREV_EVENT:
6294 {
6295 uint32_t u32ErrCode;
6296 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6297 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6298 else
6299 u32ErrCode = 0;
6300
6301 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6302 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6303 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6304 pVCpu->cpum.GstCtx.cr2);
6305
6306 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6307 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6308 Assert(rcStrict == VINF_SUCCESS);
6309 break;
6310 }
6311
6312 case IEMXCPTRAISE_REEXEC_INSTR:
6313 Assert(rcStrict == VINF_SUCCESS);
6314 break;
6315
6316 case IEMXCPTRAISE_DOUBLE_FAULT:
6317 {
6318 /*
6319 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6320 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6321 */
6322 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6323 {
6324 pVmxTransient->fVectoringDoublePF = true;
6325 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6326 pVCpu->cpum.GstCtx.cr2));
6327 rcStrict = VINF_SUCCESS;
6328 }
6329 else
6330 {
6331 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6332 vmxHCSetPendingXcptDF(pVCpu);
6333 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6334 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6335 rcStrict = VINF_HM_DOUBLE_FAULT;
6336 }
6337 break;
6338 }
6339
6340 case IEMXCPTRAISE_TRIPLE_FAULT:
6341 {
6342 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6343 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6344 rcStrict = VINF_EM_RESET;
6345 break;
6346 }
6347
6348 case IEMXCPTRAISE_CPU_HANG:
6349 {
6350 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6351 rcStrict = VERR_EM_GUEST_CPU_HANG;
6352 break;
6353 }
6354
6355 default:
6356 {
6357 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6358 rcStrict = VERR_VMX_IPE_2;
6359 break;
6360 }
6361 }
6362 }
6363 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6364 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6365 {
6366 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6367 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6368 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6369 {
6370 /*
6371 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6372 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6373 * that virtual NMIs remain blocked until the IRET execution is completed.
6374 *
6375 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6376 */
6377 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6378 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6379 }
6380 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6381 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6382 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6383 {
6384 /*
6385 * Execution of IRET caused an EPT violation, page-modification log-full event or
6386 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6387 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6388 * that virtual NMIs remain blocked until the IRET execution is completed.
6389 *
6390 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6391 */
6392 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6393 {
6394 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6395 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6396 }
6397 }
6398 }
6399
6400 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6401 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6402 return rcStrict;
6403}
6404
6405
6406#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6407/**
6408 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6409 * guest attempting to execute a VMX instruction.
6410 *
6411 * @returns Strict VBox status code (i.e. informational status codes too).
6412 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6413 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6414 *
6415 * @param pVCpu The cross context virtual CPU structure.
6416 * @param uExitReason The VM-exit reason.
6417 *
6418 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6419 * @remarks No-long-jump zone!!!
6420 */
6421static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6422{
6423 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6424 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6425
6426 /*
6427 * The physical CPU would have already checked the CPU mode/code segment.
6428 * We shall just assert here for paranoia.
6429 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6430 */
6431 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6432 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6433 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6434
6435 if (uExitReason == VMX_EXIT_VMXON)
6436 {
6437 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6438
6439 /*
6440 * We check CR4.VMXE because it is required to be always set while in VMX operation
6441 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6442 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6443 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6444 */
6445 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6446 {
6447 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6448 vmxHCSetPendingXcptUD(pVCpu);
6449 return VINF_HM_PENDING_XCPT;
6450 }
6451 }
6452 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6453 {
6454 /*
6455 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6456 * (other than VMXON), we need to raise a #UD.
6457 */
6458 Log4Func(("Not in VMX root mode -> #UD\n"));
6459 vmxHCSetPendingXcptUD(pVCpu);
6460 return VINF_HM_PENDING_XCPT;
6461 }
6462
6463 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Decodes the memory operand of an instruction that caused a VM-exit.
6470 *
6471 * The Exit qualification field provides the displacement field for memory
6472 * operand instructions, if any.
6473 *
6474 * @returns Strict VBox status code (i.e. informational status codes too).
6475 * @retval VINF_SUCCESS if the operand was successfully decoded.
6476 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6477 * operand.
6478 * @param pVCpu The cross context virtual CPU structure.
6479 * @param uExitInstrInfo The VM-exit instruction information field.
6480 * @param enmMemAccess The memory operand's access type (read or write).
6481 * @param GCPtrDisp The instruction displacement field, if any. For
6482 * RIP-relative addressing pass RIP + displacement here.
6483 * @param pGCPtrMem Where to store the effective destination memory address.
6484 *
6485 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6486 * virtual-8086 mode hence skips those checks while verifying if the
6487 * segment is valid.
6488 */
6489static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6490 PRTGCPTR pGCPtrMem)
6491{
6492 Assert(pGCPtrMem);
6493 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6494 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6495 | CPUMCTX_EXTRN_CR0);
6496
6497 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6498 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6499 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6500
6501 VMXEXITINSTRINFO ExitInstrInfo;
6502 ExitInstrInfo.u = uExitInstrInfo;
6503 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6504 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6505 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6506 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6507 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6508 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6509 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6510 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6511 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6512
6513 /*
6514 * Validate instruction information.
6515 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6516 */
6517 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6518 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6519 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6520 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6521 AssertLogRelMsgReturn(fIsMemOperand,
6522 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6523
6524 /*
6525 * Compute the complete effective address.
6526 *
6527 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6528 * See AMD spec. 4.5.2 "Segment Registers".
6529 */
6530 RTGCPTR GCPtrMem = GCPtrDisp;
6531 if (fBaseRegValid)
6532 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6533 if (fIdxRegValid)
6534 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6535
6536 RTGCPTR const GCPtrOff = GCPtrMem;
6537 if ( !fIsLongMode
6538 || iSegReg >= X86_SREG_FS)
6539 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6540 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6541
6542 /*
6543 * Validate effective address.
6544 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6545 */
6546 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6547 Assert(cbAccess > 0);
6548 if (fIsLongMode)
6549 {
6550 if (X86_IS_CANONICAL(GCPtrMem))
6551 {
6552 *pGCPtrMem = GCPtrMem;
6553 return VINF_SUCCESS;
6554 }
6555
6556 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6557 * "Data Limit Checks in 64-bit Mode". */
6558 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6559 vmxHCSetPendingXcptGP(pVCpu, 0);
6560 return VINF_HM_PENDING_XCPT;
6561 }
6562
6563 /*
6564 * This is a watered down version of iemMemApplySegment().
6565 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6566 * and segment CPL/DPL checks are skipped.
6567 */
6568 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6569 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6570 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6571
6572 /* Check if the segment is present and usable. */
6573 if ( pSel->Attr.n.u1Present
6574 && !pSel->Attr.n.u1Unusable)
6575 {
6576 Assert(pSel->Attr.n.u1DescType);
6577 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6578 {
6579 /* Check permissions for the data segment. */
6580 if ( enmMemAccess == VMXMEMACCESS_WRITE
6581 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6582 {
6583 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6584 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6585 return VINF_HM_PENDING_XCPT;
6586 }
6587
6588 /* Check limits if it's a normal data segment. */
6589 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6590 {
6591 if ( GCPtrFirst32 > pSel->u32Limit
6592 || GCPtrLast32 > pSel->u32Limit)
6593 {
6594 Log4Func(("Data segment limit exceeded. "
6595 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6596 GCPtrLast32, pSel->u32Limit));
6597 if (iSegReg == X86_SREG_SS)
6598 vmxHCSetPendingXcptSS(pVCpu, 0);
6599 else
6600 vmxHCSetPendingXcptGP(pVCpu, 0);
6601 return VINF_HM_PENDING_XCPT;
6602 }
6603 }
6604 else
6605 {
6606 /* Check limits if it's an expand-down data segment.
6607 Note! The upper boundary is defined by the B bit, not the G bit! */
6608 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6609 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6610 {
6611 Log4Func(("Expand-down data segment limit exceeded. "
6612 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6613 GCPtrLast32, pSel->u32Limit));
6614 if (iSegReg == X86_SREG_SS)
6615 vmxHCSetPendingXcptSS(pVCpu, 0);
6616 else
6617 vmxHCSetPendingXcptGP(pVCpu, 0);
6618 return VINF_HM_PENDING_XCPT;
6619 }
6620 }
6621 }
6622 else
6623 {
6624 /* Check permissions for the code segment. */
6625 if ( enmMemAccess == VMXMEMACCESS_WRITE
6626 || ( enmMemAccess == VMXMEMACCESS_READ
6627 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6628 {
6629 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6630 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6631 vmxHCSetPendingXcptGP(pVCpu, 0);
6632 return VINF_HM_PENDING_XCPT;
6633 }
6634
6635 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6636 if ( GCPtrFirst32 > pSel->u32Limit
6637 || GCPtrLast32 > pSel->u32Limit)
6638 {
6639 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6640 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6641 if (iSegReg == X86_SREG_SS)
6642 vmxHCSetPendingXcptSS(pVCpu, 0);
6643 else
6644 vmxHCSetPendingXcptGP(pVCpu, 0);
6645 return VINF_HM_PENDING_XCPT;
6646 }
6647 }
6648 }
6649 else
6650 {
6651 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6652 vmxHCSetPendingXcptGP(pVCpu, 0);
6653 return VINF_HM_PENDING_XCPT;
6654 }
6655
6656 *pGCPtrMem = GCPtrMem;
6657 return VINF_SUCCESS;
6658}
6659#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6660
6661
6662/**
6663 * VM-exit helper for LMSW.
6664 */
6665static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6666{
6667 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6668 AssertRCReturn(rc, rc);
6669
6670 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6671 AssertMsg( rcStrict == VINF_SUCCESS
6672 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6673
6674 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6675 if (rcStrict == VINF_IEM_RAISED_XCPT)
6676 {
6677 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6678 rcStrict = VINF_SUCCESS;
6679 }
6680
6681 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6682 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6683 return rcStrict;
6684}
6685
6686
6687/**
6688 * VM-exit helper for CLTS.
6689 */
6690static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6691{
6692 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6693 AssertRCReturn(rc, rc);
6694
6695 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6696 AssertMsg( rcStrict == VINF_SUCCESS
6697 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6698
6699 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6700 if (rcStrict == VINF_IEM_RAISED_XCPT)
6701 {
6702 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6703 rcStrict = VINF_SUCCESS;
6704 }
6705
6706 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6707 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6708 return rcStrict;
6709}
6710
6711
6712/**
6713 * VM-exit helper for MOV from CRx (CRx read).
6714 */
6715static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6716{
6717 Assert(iCrReg < 16);
6718 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6719
6720 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6721 AssertRCReturn(rc, rc);
6722
6723 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6724 AssertMsg( rcStrict == VINF_SUCCESS
6725 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6726
6727 if (iGReg == X86_GREG_xSP)
6728 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6729 else
6730 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6731#ifdef VBOX_WITH_STATISTICS
6732 switch (iCrReg)
6733 {
6734 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6735 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6736 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6737 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6738 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6739 }
6740#endif
6741 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6742 return rcStrict;
6743}
6744
6745
6746/**
6747 * VM-exit helper for MOV to CRx (CRx write).
6748 */
6749static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6750{
6751 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6752
6753 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6754 AssertMsg( rcStrict == VINF_SUCCESS
6755 || rcStrict == VINF_IEM_RAISED_XCPT
6756 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6757
6758 switch (iCrReg)
6759 {
6760 case 0:
6761 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6762 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6763 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6764 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6765 break;
6766
6767 case 2:
6768 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6769 /* Nothing to do here, CR2 it's not part of the VMCS. */
6770 break;
6771
6772 case 3:
6773 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6774 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6775 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6776 break;
6777
6778 case 4:
6779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6780 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6781#ifndef IN_NEM_DARWIN
6782 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6783 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6784#else
6785 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6786#endif
6787 break;
6788
6789 case 8:
6790 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6791 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6792 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6793 break;
6794
6795 default:
6796 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6797 break;
6798 }
6799
6800 if (rcStrict == VINF_IEM_RAISED_XCPT)
6801 {
6802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6803 rcStrict = VINF_SUCCESS;
6804 }
6805 return rcStrict;
6806}
6807
6808
6809/**
6810 * VM-exit exception handler for \#PF (Page-fault exception).
6811 *
6812 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6813 */
6814static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6815{
6816 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6817 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6818
6819#ifndef IN_NEM_DARWIN
6820 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6821 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6822 { /* likely */ }
6823 else
6824#endif
6825 {
6826#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6827 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6828#endif
6829 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6830 if (!pVmxTransient->fVectoringDoublePF)
6831 {
6832 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6833 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6834 }
6835 else
6836 {
6837 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6838 Assert(!pVmxTransient->fIsNestedGuest);
6839 vmxHCSetPendingXcptDF(pVCpu);
6840 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6841 }
6842 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6843 return VINF_SUCCESS;
6844 }
6845
6846 Assert(!pVmxTransient->fIsNestedGuest);
6847
6848 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6849 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6850 if (pVmxTransient->fVectoringPF)
6851 {
6852 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6853 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6854 }
6855
6856 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6857 AssertRCReturn(rc, rc);
6858
6859 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6860 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6861
6862 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6863 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6864
6865 Log4Func(("#PF: rc=%Rrc\n", rc));
6866 if (rc == VINF_SUCCESS)
6867 {
6868 /*
6869 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6870 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6871 */
6872 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6873 TRPMResetTrap(pVCpu);
6874 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6875 return rc;
6876 }
6877
6878 if (rc == VINF_EM_RAW_GUEST_TRAP)
6879 {
6880 if (!pVmxTransient->fVectoringDoublePF)
6881 {
6882 /* It's a guest page fault and needs to be reflected to the guest. */
6883 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6884 TRPMResetTrap(pVCpu);
6885 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6886 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6887 uGstErrorCode, pVmxTransient->uExitQual);
6888 }
6889 else
6890 {
6891 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6892 TRPMResetTrap(pVCpu);
6893 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6894 vmxHCSetPendingXcptDF(pVCpu);
6895 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6896 }
6897
6898 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6899 return VINF_SUCCESS;
6900 }
6901
6902 TRPMResetTrap(pVCpu);
6903 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6904 return rc;
6905}
6906
6907
6908/**
6909 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6910 *
6911 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6912 */
6913static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6914{
6915 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6916 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6917
6918 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6919 AssertRCReturn(rc, rc);
6920
6921 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6922 {
6923 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6924 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6925
6926 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6927 * provides VM-exit instruction length. If this causes problem later,
6928 * disassemble the instruction like it's done on AMD-V. */
6929 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6930 AssertRCReturn(rc2, rc2);
6931 return rc;
6932 }
6933
6934 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6935 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6936 return VINF_SUCCESS;
6937}
6938
6939
6940/**
6941 * VM-exit exception handler for \#BP (Breakpoint exception).
6942 *
6943 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6944 */
6945static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6946{
6947 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6948 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6949
6950 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6951 AssertRCReturn(rc, rc);
6952
6953 VBOXSTRICTRC rcStrict;
6954 if (!pVmxTransient->fIsNestedGuest)
6955 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6956 else
6957 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6958
6959 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6960 {
6961 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6962 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6963 rcStrict = VINF_SUCCESS;
6964 }
6965
6966 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6967 return rcStrict;
6968}
6969
6970
6971/**
6972 * VM-exit exception handler for \#AC (Alignment-check exception).
6973 *
6974 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6975 */
6976static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6977{
6978 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6979
6980 /*
6981 * Detect #ACs caused by host having enabled split-lock detection.
6982 * Emulate such instructions.
6983 */
6984#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6985 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6986 AssertRCReturn(rc, rc);
6987 /** @todo detect split lock in cpu feature? */
6988 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6989 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6990 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6991 || CPUMGetGuestCPL(pVCpu) != 3
6992 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6993 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6994 {
6995 /*
6996 * Check for debug/trace events and import state accordingly.
6997 */
6998 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6999 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7000 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7001#ifndef IN_NEM_DARWIN
7002 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7003#endif
7004 )
7005 {
7006 if (pVM->cCpus == 1)
7007 {
7008#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7009 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7010 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7011#else
7012 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7013 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7014#endif
7015 AssertRCReturn(rc, rc);
7016 }
7017 }
7018 else
7019 {
7020 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7021 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7022 AssertRCReturn(rc, rc);
7023
7024 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7025
7026 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7027 {
7028 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7029 if (rcStrict != VINF_SUCCESS)
7030 return rcStrict;
7031 }
7032 }
7033
7034 /*
7035 * Emulate the instruction.
7036 *
7037 * We have to ignore the LOCK prefix here as we must not retrigger the
7038 * detection on the host. This isn't all that satisfactory, though...
7039 */
7040 if (pVM->cCpus == 1)
7041 {
7042 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7043 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7044
7045 /** @todo For SMP configs we should do a rendezvous here. */
7046 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7047 if (rcStrict == VINF_SUCCESS)
7048#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7049 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7050 HM_CHANGED_GUEST_RIP
7051 | HM_CHANGED_GUEST_RFLAGS
7052 | HM_CHANGED_GUEST_GPRS_MASK
7053 | HM_CHANGED_GUEST_CS
7054 | HM_CHANGED_GUEST_SS);
7055#else
7056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7057#endif
7058 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7059 {
7060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7061 rcStrict = VINF_SUCCESS;
7062 }
7063 return rcStrict;
7064 }
7065 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7067 return VINF_EM_EMULATE_SPLIT_LOCK;
7068 }
7069
7070 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7071 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7072 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7073
7074 /* Re-inject it. We'll detect any nesting before getting here. */
7075 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7076 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7077 return VINF_SUCCESS;
7078}
7079
7080
7081/**
7082 * VM-exit exception handler for \#DB (Debug exception).
7083 *
7084 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7085 */
7086static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7087{
7088 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7089 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7090
7091 /*
7092 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7093 */
7094 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7095
7096 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7097 uint64_t const uDR6 = X86_DR6_INIT_VAL
7098 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7099 | X86_DR6_BD | X86_DR6_BS));
7100 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7101
7102 int rc;
7103 if (!pVmxTransient->fIsNestedGuest)
7104 {
7105 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7106
7107 /*
7108 * Prevents stepping twice over the same instruction when the guest is stepping using
7109 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7110 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7111 */
7112 if ( rc == VINF_EM_DBG_STEPPED
7113 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7114 {
7115 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7116 rc = VINF_EM_RAW_GUEST_TRAP;
7117 }
7118 }
7119 else
7120 rc = VINF_EM_RAW_GUEST_TRAP;
7121 Log6Func(("rc=%Rrc\n", rc));
7122 if (rc == VINF_EM_RAW_GUEST_TRAP)
7123 {
7124 /*
7125 * The exception was for the guest. Update DR6, DR7.GD and
7126 * IA32_DEBUGCTL.LBR before forwarding it.
7127 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7128 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7129 */
7130#ifndef IN_NEM_DARWIN
7131 VMMRZCallRing3Disable(pVCpu);
7132 HM_DISABLE_PREEMPT(pVCpu);
7133
7134 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7135 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7136 if (CPUMIsGuestDebugStateActive(pVCpu))
7137 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7138
7139 HM_RESTORE_PREEMPT();
7140 VMMRZCallRing3Enable(pVCpu);
7141#else
7142 /** @todo */
7143#endif
7144
7145 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7146 AssertRCReturn(rc, rc);
7147
7148 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7149 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7150
7151 /* Paranoia. */
7152 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7153 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7154
7155 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7156 AssertRC(rc);
7157
7158 /*
7159 * Raise #DB in the guest.
7160 *
7161 * It is important to reflect exactly what the VM-exit gave us (preserving the
7162 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7163 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7164 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7165 *
7166 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7167 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7168 */
7169 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7170 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7171 return VINF_SUCCESS;
7172 }
7173
7174 /*
7175 * Not a guest trap, must be a hypervisor related debug event then.
7176 * Update DR6 in case someone is interested in it.
7177 */
7178 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7179 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7180 CPUMSetHyperDR6(pVCpu, uDR6);
7181
7182 return rc;
7183}
7184
7185
7186/**
7187 * Hacks its way around the lovely mesa driver's backdoor accesses.
7188 *
7189 * @sa hmR0SvmHandleMesaDrvGp.
7190 */
7191static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7192{
7193 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7194 RT_NOREF(pCtx);
7195
7196 /* For now we'll just skip the instruction. */
7197 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7198}
7199
7200
7201/**
7202 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7203 * backdoor logging w/o checking what it is running inside.
7204 *
7205 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7206 * backdoor port and magic numbers loaded in registers.
7207 *
7208 * @returns true if it is, false if it isn't.
7209 * @sa hmR0SvmIsMesaDrvGp.
7210 */
7211DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7212{
7213 /* 0xed: IN eAX,dx */
7214 uint8_t abInstr[1];
7215 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7216 return false;
7217
7218 /* Check that it is #GP(0). */
7219 if (pVmxTransient->uExitIntErrorCode != 0)
7220 return false;
7221
7222 /* Check magic and port. */
7223 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7224 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7225 if (pCtx->rax != UINT32_C(0x564d5868))
7226 return false;
7227 if (pCtx->dx != UINT32_C(0x5658))
7228 return false;
7229
7230 /* Flat ring-3 CS. */
7231 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7232 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7233 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7234 if (pCtx->cs.Attr.n.u2Dpl != 3)
7235 return false;
7236 if (pCtx->cs.u64Base != 0)
7237 return false;
7238
7239 /* Check opcode. */
7240 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7241 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7242 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7243 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7244 if (RT_FAILURE(rc))
7245 return false;
7246 if (abInstr[0] != 0xed)
7247 return false;
7248
7249 return true;
7250}
7251
7252
7253/**
7254 * VM-exit exception handler for \#GP (General-protection exception).
7255 *
7256 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7257 */
7258static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7259{
7260 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7261 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7262
7263 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7265#ifndef IN_NEM_DARWIN
7266 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7267 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7268 { /* likely */ }
7269 else
7270#endif
7271 {
7272#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7273# ifndef IN_NEM_DARWIN
7274 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7275# else
7276 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7277# endif
7278#endif
7279 /*
7280 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7281 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7282 */
7283 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7284 AssertRCReturn(rc, rc);
7285 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7286 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7287
7288 if ( pVmxTransient->fIsNestedGuest
7289 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7290 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7291 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7292 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7293 else
7294 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7295 return rc;
7296 }
7297
7298#ifndef IN_NEM_DARWIN
7299 Assert(CPUMIsGuestInRealModeEx(pCtx));
7300 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7301 Assert(!pVmxTransient->fIsNestedGuest);
7302
7303 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7304 AssertRCReturn(rc, rc);
7305
7306 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7307 if (rcStrict == VINF_SUCCESS)
7308 {
7309 if (!CPUMIsGuestInRealModeEx(pCtx))
7310 {
7311 /*
7312 * The guest is no longer in real-mode, check if we can continue executing the
7313 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7314 */
7315 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7316 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7317 {
7318 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7320 }
7321 else
7322 {
7323 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7324 rcStrict = VINF_EM_RESCHEDULE;
7325 }
7326 }
7327 else
7328 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7329 }
7330 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7331 {
7332 rcStrict = VINF_SUCCESS;
7333 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7334 }
7335 return VBOXSTRICTRC_VAL(rcStrict);
7336#endif
7337}
7338
7339
7340/**
7341 * VM-exit exception handler for \#DE (Divide Error).
7342 *
7343 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7344 */
7345static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7346{
7347 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7349
7350 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7351 AssertRCReturn(rc, rc);
7352
7353 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7354 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7355 {
7356 uint8_t cbInstr = 0;
7357 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7358 if (rc2 == VINF_SUCCESS)
7359 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7360 else if (rc2 == VERR_NOT_FOUND)
7361 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7362 else
7363 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7364 }
7365 else
7366 rcStrict = VINF_SUCCESS; /* Do nothing. */
7367
7368 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7369 if (RT_FAILURE(rcStrict))
7370 {
7371 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7372 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7373 rcStrict = VINF_SUCCESS;
7374 }
7375
7376 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7377 return VBOXSTRICTRC_VAL(rcStrict);
7378}
7379
7380
7381/**
7382 * VM-exit exception handler wrapper for all other exceptions that are not handled
7383 * by a specific handler.
7384 *
7385 * This simply re-injects the exception back into the VM without any special
7386 * processing.
7387 *
7388 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7389 */
7390static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7391{
7392 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7393
7394#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7395# ifndef IN_NEM_DARWIN
7396 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7397 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7398 ("uVector=%#x u32XcptBitmap=%#X32\n",
7399 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7400 NOREF(pVmcsInfo);
7401# endif
7402#endif
7403
7404 /*
7405 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7406 * would have been handled while checking exits due to event delivery.
7407 */
7408 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7409
7410#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7411 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7412 AssertRCReturn(rc, rc);
7413 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7414#endif
7415
7416#ifdef VBOX_WITH_STATISTICS
7417 switch (uVector)
7418 {
7419 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7420 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7421 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7422 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7423 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7424 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7425 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7426 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7427 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7428 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7429 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7430 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7431 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7432 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7433 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7434 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7435 default:
7436 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7437 break;
7438 }
7439#endif
7440
7441 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7442 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7443 NOREF(uVector);
7444
7445 /* Re-inject the original exception into the guest. */
7446 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7447 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7448 return VINF_SUCCESS;
7449}
7450
7451
7452/**
7453 * VM-exit exception handler for all exceptions (except NMIs!).
7454 *
7455 * @remarks This may be called for both guests and nested-guests. Take care to not
7456 * make assumptions and avoid doing anything that is not relevant when
7457 * executing a nested-guest (e.g., Mesa driver hacks).
7458 */
7459static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7460{
7461 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7462
7463 /*
7464 * If this VM-exit occurred while delivering an event through the guest IDT, take
7465 * action based on the return code and additional hints (e.g. for page-faults)
7466 * that will be updated in the VMX transient structure.
7467 */
7468 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7469 if (rcStrict == VINF_SUCCESS)
7470 {
7471 /*
7472 * If an exception caused a VM-exit due to delivery of an event, the original
7473 * event may have to be re-injected into the guest. We shall reinject it and
7474 * continue guest execution. However, page-fault is a complicated case and
7475 * needs additional processing done in vmxHCExitXcptPF().
7476 */
7477 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7478 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7479 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7480 || uVector == X86_XCPT_PF)
7481 {
7482 switch (uVector)
7483 {
7484 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7485 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7486 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7487 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7488 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7489 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7490 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7491 default:
7492 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7493 }
7494 }
7495 /* else: inject pending event before resuming guest execution. */
7496 }
7497 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7498 {
7499 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7500 rcStrict = VINF_SUCCESS;
7501 }
7502
7503 return rcStrict;
7504}
7505/** @} */
7506
7507
7508/** @name VM-exit handlers.
7509 * @{
7510 */
7511/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7512/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7513/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7514
7515/**
7516 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7517 */
7518HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7519{
7520 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7521 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7522
7523#ifndef IN_NEM_DARWIN
7524 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7525 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7526 return VINF_SUCCESS;
7527 return VINF_EM_RAW_INTERRUPT;
7528#else
7529 return VINF_SUCCESS;
7530#endif
7531}
7532
7533
7534/**
7535 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7536 * VM-exit.
7537 */
7538HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7539{
7540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7541 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7542
7543 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7544
7545 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7546 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7547 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7548
7549 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7550 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7551 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7552 NOREF(pVmcsInfo);
7553
7554 VBOXSTRICTRC rcStrict;
7555 switch (uExitIntType)
7556 {
7557#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7558 /*
7559 * Host physical NMIs:
7560 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7561 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7562 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7563 *
7564 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7565 * See Intel spec. 27.5.5 "Updating Non-Register State".
7566 */
7567 case VMX_EXIT_INT_INFO_TYPE_NMI:
7568 {
7569 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7570 break;
7571 }
7572#endif
7573
7574 /*
7575 * Privileged software exceptions (#DB from ICEBP),
7576 * Software exceptions (#BP and #OF),
7577 * Hardware exceptions:
7578 * Process the required exceptions and resume guest execution if possible.
7579 */
7580 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7581 Assert(uVector == X86_XCPT_DB);
7582 RT_FALL_THRU();
7583 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7584 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7585 RT_FALL_THRU();
7586 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7587 {
7588 NOREF(uVector);
7589 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7590 | HMVMX_READ_EXIT_INSTR_LEN
7591 | HMVMX_READ_IDT_VECTORING_INFO
7592 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7593 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7594 break;
7595 }
7596
7597 default:
7598 {
7599 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7600 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7601 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7602 break;
7603 }
7604 }
7605
7606 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7607 return rcStrict;
7608}
7609
7610
7611/**
7612 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7613 */
7614HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7615{
7616 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7617
7618 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7619 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7620 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7621
7622 /* Evaluate and deliver pending events and resume guest execution. */
7623 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7624 return VINF_SUCCESS;
7625}
7626
7627
7628/**
7629 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7630 */
7631HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7632{
7633 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7634
7635 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7636 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7637 {
7638 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7639 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7640 }
7641
7642 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7643
7644 /*
7645 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7646 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7647 */
7648 uint32_t fIntrState;
7649 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7650 AssertRC(rc);
7651 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7652 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7653 {
7654 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7655
7656 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7657 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7658 AssertRC(rc);
7659 }
7660
7661 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7662 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7663
7664 /* Evaluate and deliver pending events and resume guest execution. */
7665 return VINF_SUCCESS;
7666}
7667
7668
7669/**
7670 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7671 */
7672HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7673{
7674 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7675 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7676}
7677
7678
7679/**
7680 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7681 */
7682HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7683{
7684 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7685 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7686}
7687
7688
7689/**
7690 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7691 */
7692HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7693{
7694 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7695
7696 /*
7697 * Get the state we need and update the exit history entry.
7698 */
7699 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7700 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7701 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7702 AssertRCReturn(rc, rc);
7703
7704 VBOXSTRICTRC rcStrict;
7705 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7706 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7707 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7708 if (!pExitRec)
7709 {
7710 /*
7711 * Regular CPUID instruction execution.
7712 */
7713 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7714 if (rcStrict == VINF_SUCCESS)
7715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7716 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7717 {
7718 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7719 rcStrict = VINF_SUCCESS;
7720 }
7721 }
7722 else
7723 {
7724 /*
7725 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7726 */
7727 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7728 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7729 AssertRCReturn(rc2, rc2);
7730
7731 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7732 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7733
7734 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7735 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7736
7737 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7738 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7739 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7740 }
7741 return rcStrict;
7742}
7743
7744
7745/**
7746 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7747 */
7748HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7749{
7750 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7751
7752 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7753 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7754 AssertRCReturn(rc, rc);
7755
7756 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7757 return VINF_EM_RAW_EMULATE_INSTR;
7758
7759 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7760 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7761}
7762
7763
7764/**
7765 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7766 */
7767HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7768{
7769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7770
7771 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7772 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7773 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7774 AssertRCReturn(rc, rc);
7775
7776 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7777 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7778 {
7779 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7780 we must reset offsetting on VM-entry. See @bugref{6634}. */
7781 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7782 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7784 }
7785 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7786 {
7787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7788 rcStrict = VINF_SUCCESS;
7789 }
7790 return rcStrict;
7791}
7792
7793
7794/**
7795 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7796 */
7797HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7798{
7799 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7800
7801 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7802 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7803 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7804 AssertRCReturn(rc, rc);
7805
7806 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7807 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7808 {
7809 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7810 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7811 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7812 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7814 }
7815 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7816 {
7817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7818 rcStrict = VINF_SUCCESS;
7819 }
7820 return rcStrict;
7821}
7822
7823
7824/**
7825 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7826 */
7827HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7828{
7829 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7830
7831 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7832 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7833 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7834 AssertRCReturn(rc, rc);
7835
7836 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7839 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7840 {
7841 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7842 rcStrict = VINF_SUCCESS;
7843 }
7844 return rcStrict;
7845}
7846
7847
7848/**
7849 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7850 */
7851HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7852{
7853 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7854
7855 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7856 if (EMAreHypercallInstructionsEnabled(pVCpu))
7857 {
7858 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7859 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7860 | CPUMCTX_EXTRN_RFLAGS
7861 | CPUMCTX_EXTRN_CR0
7862 | CPUMCTX_EXTRN_SS
7863 | CPUMCTX_EXTRN_CS
7864 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7865 AssertRCReturn(rc, rc);
7866
7867 /* Perform the hypercall. */
7868 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7869 if (rcStrict == VINF_SUCCESS)
7870 {
7871 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7872 AssertRCReturn(rc, rc);
7873 }
7874 else
7875 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7876 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7877 || RT_FAILURE(rcStrict));
7878
7879 /* If the hypercall changes anything other than guest's general-purpose registers,
7880 we would need to reload the guest changed bits here before VM-entry. */
7881 }
7882 else
7883 Log4Func(("Hypercalls not enabled\n"));
7884
7885 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7886 if (RT_FAILURE(rcStrict))
7887 {
7888 vmxHCSetPendingXcptUD(pVCpu);
7889 rcStrict = VINF_SUCCESS;
7890 }
7891
7892 return rcStrict;
7893}
7894
7895
7896/**
7897 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7898 */
7899HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7900{
7901 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7902#ifndef IN_NEM_DARWIN
7903 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7904#endif
7905
7906 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7907 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7908 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7909 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7910 AssertRCReturn(rc, rc);
7911
7912 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7913
7914 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7916 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7917 {
7918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7919 rcStrict = VINF_SUCCESS;
7920 }
7921 else
7922 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7923 VBOXSTRICTRC_VAL(rcStrict)));
7924 return rcStrict;
7925}
7926
7927
7928/**
7929 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7930 */
7931HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7932{
7933 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7934
7935 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7936 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7937 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7938 AssertRCReturn(rc, rc);
7939
7940 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7941 if (rcStrict == VINF_SUCCESS)
7942 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7943 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7944 {
7945 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7946 rcStrict = VINF_SUCCESS;
7947 }
7948
7949 return rcStrict;
7950}
7951
7952
7953/**
7954 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7955 */
7956HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7957{
7958 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7959
7960 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7961 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7962 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7963 AssertRCReturn(rc, rc);
7964
7965 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7966 if (RT_SUCCESS(rcStrict))
7967 {
7968 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7969 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7970 rcStrict = VINF_SUCCESS;
7971 }
7972
7973 return rcStrict;
7974}
7975
7976
7977/**
7978 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7979 * VM-exit.
7980 */
7981HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7982{
7983 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7984 return VINF_EM_RESET;
7985}
7986
7987
7988/**
7989 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7990 */
7991HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7992{
7993 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7994
7995 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7996 AssertRCReturn(rc, rc);
7997
7998 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7999 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8000 rc = VINF_SUCCESS;
8001 else
8002 rc = VINF_EM_HALT;
8003
8004 if (rc != VINF_SUCCESS)
8005 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8006 return rc;
8007}
8008
8009
8010#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8011/**
8012 * VM-exit handler for instructions that result in a \#UD exception delivered to
8013 * the guest.
8014 */
8015HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8016{
8017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8018 vmxHCSetPendingXcptUD(pVCpu);
8019 return VINF_SUCCESS;
8020}
8021#endif
8022
8023
8024/**
8025 * VM-exit handler for expiry of the VMX-preemption timer.
8026 */
8027HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8028{
8029 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8030
8031 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8032 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8033Log12(("vmxHCExitPreemptTimer:\n"));
8034
8035 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8036 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8037 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8038 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8039 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8040}
8041
8042
8043/**
8044 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8045 */
8046HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8047{
8048 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8049
8050 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8051 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8052 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8053 AssertRCReturn(rc, rc);
8054
8055 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8057 : HM_CHANGED_RAISED_XCPT_MASK);
8058
8059#ifndef IN_NEM_DARWIN
8060 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8061 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8062 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8063 {
8064 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8065 hmR0VmxUpdateStartVmFunction(pVCpu);
8066 }
8067#endif
8068
8069 return rcStrict;
8070}
8071
8072
8073/**
8074 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8075 */
8076HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8077{
8078 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8079
8080 /** @todo Enable the new code after finding a reliably guest test-case. */
8081#if 1
8082 return VERR_EM_INTERPRETER;
8083#else
8084 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8085 | HMVMX_READ_EXIT_INSTR_INFO
8086 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8087 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8088 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8089 AssertRCReturn(rc, rc);
8090
8091 /* Paranoia. Ensure this has a memory operand. */
8092 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8093
8094 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8095 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8096 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8097 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8098
8099 RTGCPTR GCPtrDesc;
8100 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8101
8102 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8103 GCPtrDesc, uType);
8104 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8105 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8106 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8107 {
8108 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8109 rcStrict = VINF_SUCCESS;
8110 }
8111 return rcStrict;
8112#endif
8113}
8114
8115
8116/**
8117 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8118 * VM-exit.
8119 */
8120HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8121{
8122 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8123 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8124 AssertRCReturn(rc, rc);
8125
8126 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8127 if (RT_FAILURE(rc))
8128 return rc;
8129
8130 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8131 NOREF(uInvalidReason);
8132
8133#ifdef VBOX_STRICT
8134 uint32_t fIntrState;
8135 uint64_t u64Val;
8136 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8137 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8138 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8139
8140 Log4(("uInvalidReason %u\n", uInvalidReason));
8141 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8142 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8143 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8144
8145 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8146 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8147 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8148 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8149 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8150 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8151 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8152 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8153 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8154 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8155 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8156 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8157# ifndef IN_NEM_DARWIN
8158 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8159 {
8160 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8161 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8162 }
8163
8164 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8165# endif
8166#endif
8167
8168 return VERR_VMX_INVALID_GUEST_STATE;
8169}
8170
8171/**
8172 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8173 */
8174HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8175{
8176 /*
8177 * Cumulative notes of all recognized but unexpected VM-exits.
8178 *
8179 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8180 * nested-paging is used.
8181 *
8182 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8183 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8184 * this function (and thereby stop VM execution) for handling such instructions.
8185 *
8186 *
8187 * VMX_EXIT_INIT_SIGNAL:
8188 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8189 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8190 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8191 *
8192 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8193 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8194 * See Intel spec. "23.8 Restrictions on VMX operation".
8195 *
8196 * VMX_EXIT_SIPI:
8197 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8198 * activity state is used. We don't make use of it as our guests don't have direct
8199 * access to the host local APIC.
8200 *
8201 * See Intel spec. 25.3 "Other Causes of VM-exits".
8202 *
8203 * VMX_EXIT_IO_SMI:
8204 * VMX_EXIT_SMI:
8205 * This can only happen if we support dual-monitor treatment of SMI, which can be
8206 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8207 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8208 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8209 *
8210 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8211 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8212 *
8213 * VMX_EXIT_ERR_MSR_LOAD:
8214 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8215 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8216 * execution.
8217 *
8218 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8219 *
8220 * VMX_EXIT_ERR_MACHINE_CHECK:
8221 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8222 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8223 * #MC exception abort class exception is raised. We thus cannot assume a
8224 * reasonable chance of continuing any sort of execution and we bail.
8225 *
8226 * See Intel spec. 15.1 "Machine-check Architecture".
8227 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8228 *
8229 * VMX_EXIT_PML_FULL:
8230 * VMX_EXIT_VIRTUALIZED_EOI:
8231 * VMX_EXIT_APIC_WRITE:
8232 * We do not currently support any of these features and thus they are all unexpected
8233 * VM-exits.
8234 *
8235 * VMX_EXIT_GDTR_IDTR_ACCESS:
8236 * VMX_EXIT_LDTR_TR_ACCESS:
8237 * VMX_EXIT_RDRAND:
8238 * VMX_EXIT_RSM:
8239 * VMX_EXIT_VMFUNC:
8240 * VMX_EXIT_ENCLS:
8241 * VMX_EXIT_RDSEED:
8242 * VMX_EXIT_XSAVES:
8243 * VMX_EXIT_XRSTORS:
8244 * VMX_EXIT_UMWAIT:
8245 * VMX_EXIT_TPAUSE:
8246 * VMX_EXIT_LOADIWKEY:
8247 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8248 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8249 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8250 *
8251 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8252 */
8253 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8254 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8255 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8256}
8257
8258
8259/**
8260 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8261 */
8262HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8263{
8264 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8265
8266 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8267
8268 /** @todo Optimize this: We currently drag in the whole MSR state
8269 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8270 * MSRs required. That would require changes to IEM and possibly CPUM too.
8271 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8272 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8273 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8274 int rc;
8275 switch (idMsr)
8276 {
8277 default:
8278 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8279 __FUNCTION__);
8280 AssertRCReturn(rc, rc);
8281 break;
8282 case MSR_K8_FS_BASE:
8283 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8284 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8285 AssertRCReturn(rc, rc);
8286 break;
8287 case MSR_K8_GS_BASE:
8288 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8289 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8290 AssertRCReturn(rc, rc);
8291 break;
8292 }
8293
8294 Log4Func(("ecx=%#RX32\n", idMsr));
8295
8296#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8297 Assert(!pVmxTransient->fIsNestedGuest);
8298 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8299 {
8300 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8301 && idMsr != MSR_K6_EFER)
8302 {
8303 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8304 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8305 }
8306 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8307 {
8308 Assert(pVmcsInfo->pvMsrBitmap);
8309 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8310 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8311 {
8312 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8313 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8314 }
8315 }
8316 }
8317#endif
8318
8319 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8320 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8321 if (rcStrict == VINF_SUCCESS)
8322 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8323 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8324 {
8325 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8326 rcStrict = VINF_SUCCESS;
8327 }
8328 else
8329 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8330 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8331
8332 return rcStrict;
8333}
8334
8335
8336/**
8337 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8338 */
8339HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8340{
8341 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8342
8343 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8344
8345 /*
8346 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8347 * Although we don't need to fetch the base as it will be overwritten shortly, while
8348 * loading guest-state we would also load the entire segment register including limit
8349 * and attributes and thus we need to load them here.
8350 */
8351 /** @todo Optimize this: We currently drag in the whole MSR state
8352 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8353 * MSRs required. That would require changes to IEM and possibly CPUM too.
8354 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8355 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8356 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8357 int rc;
8358 switch (idMsr)
8359 {
8360 default:
8361 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8362 __FUNCTION__);
8363 AssertRCReturn(rc, rc);
8364 break;
8365
8366 case MSR_K8_FS_BASE:
8367 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8368 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8369 AssertRCReturn(rc, rc);
8370 break;
8371 case MSR_K8_GS_BASE:
8372 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8373 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8374 AssertRCReturn(rc, rc);
8375 break;
8376 }
8377 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8378
8379 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8380 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8381
8382 if (rcStrict == VINF_SUCCESS)
8383 {
8384 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8385
8386 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8387 if ( idMsr == MSR_IA32_APICBASE
8388 || ( idMsr >= MSR_IA32_X2APIC_START
8389 && idMsr <= MSR_IA32_X2APIC_END))
8390 {
8391 /*
8392 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8393 * When full APIC register virtualization is implemented we'll have to make
8394 * sure APIC state is saved from the VMCS before IEM changes it.
8395 */
8396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8397 }
8398 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8399 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8400 else if (idMsr == MSR_K6_EFER)
8401 {
8402 /*
8403 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8404 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8405 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8406 */
8407 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8408 }
8409
8410 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8411 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8412 {
8413 switch (idMsr)
8414 {
8415 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8416 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8417 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8418 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8419 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8420 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8421 default:
8422 {
8423#ifndef IN_NEM_DARWIN
8424 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8425 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8426 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8427 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8428#else
8429 AssertMsgFailed(("TODO\n"));
8430#endif
8431 break;
8432 }
8433 }
8434 }
8435#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8436 else
8437 {
8438 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8439 switch (idMsr)
8440 {
8441 case MSR_IA32_SYSENTER_CS:
8442 case MSR_IA32_SYSENTER_EIP:
8443 case MSR_IA32_SYSENTER_ESP:
8444 case MSR_K8_FS_BASE:
8445 case MSR_K8_GS_BASE:
8446 {
8447 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8448 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8449 }
8450
8451 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8452 default:
8453 {
8454 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8455 {
8456 /* EFER MSR writes are always intercepted. */
8457 if (idMsr != MSR_K6_EFER)
8458 {
8459 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8460 idMsr));
8461 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8462 }
8463 }
8464
8465 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8466 {
8467 Assert(pVmcsInfo->pvMsrBitmap);
8468 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8469 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8470 {
8471 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8472 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8473 }
8474 }
8475 break;
8476 }
8477 }
8478 }
8479#endif /* VBOX_STRICT */
8480 }
8481 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8482 {
8483 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8484 rcStrict = VINF_SUCCESS;
8485 }
8486 else
8487 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8488 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8489
8490 return rcStrict;
8491}
8492
8493
8494/**
8495 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8496 */
8497HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8498{
8499 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8500
8501 /** @todo The guest has likely hit a contended spinlock. We might want to
8502 * poke a schedule different guest VCPU. */
8503 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8504 if (RT_SUCCESS(rc))
8505 return VINF_EM_RAW_INTERRUPT;
8506
8507 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8508 return rc;
8509}
8510
8511
8512/**
8513 * VM-exit handler for when the TPR value is lowered below the specified
8514 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8515 */
8516HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8517{
8518 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8519 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8520
8521 /*
8522 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8523 * We'll re-evaluate pending interrupts and inject them before the next VM
8524 * entry so we can just continue execution here.
8525 */
8526 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8527 return VINF_SUCCESS;
8528}
8529
8530
8531/**
8532 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8533 * VM-exit.
8534 *
8535 * @retval VINF_SUCCESS when guest execution can continue.
8536 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8537 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8538 * incompatible guest state for VMX execution (real-on-v86 case).
8539 */
8540HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8541{
8542 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8543 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8544
8545 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8546 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8547 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8548
8549 VBOXSTRICTRC rcStrict;
8550 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8551 uint64_t const uExitQual = pVmxTransient->uExitQual;
8552 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8553 switch (uAccessType)
8554 {
8555 /*
8556 * MOV to CRx.
8557 */
8558 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8559 {
8560 /*
8561 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8562 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8563 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8564 * PAE PDPTEs as well.
8565 */
8566 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8567 AssertRCReturn(rc, rc);
8568
8569 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8570#ifndef IN_NEM_DARWIN
8571 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8572#endif
8573 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8574 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8575
8576 /*
8577 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8578 * - When nested paging isn't used.
8579 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8580 * - We are executing in the VM debug loop.
8581 */
8582#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8583# ifndef IN_NEM_DARWIN
8584 Assert( iCrReg != 3
8585 || !VM_IS_VMX_NESTED_PAGING(pVM)
8586 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8587 || pVCpu->hmr0.s.fUsingDebugLoop);
8588# else
8589 Assert( iCrReg != 3
8590 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8591# endif
8592#endif
8593
8594 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8595 Assert( iCrReg != 8
8596 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8597
8598 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8599 AssertMsg( rcStrict == VINF_SUCCESS
8600 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8601
8602#ifndef IN_NEM_DARWIN
8603 /*
8604 * This is a kludge for handling switches back to real mode when we try to use
8605 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8606 * deal with special selector values, so we have to return to ring-3 and run
8607 * there till the selector values are V86 mode compatible.
8608 *
8609 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8610 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8611 * this function.
8612 */
8613 if ( iCrReg == 0
8614 && rcStrict == VINF_SUCCESS
8615 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8616 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8617 && (uOldCr0 & X86_CR0_PE)
8618 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8619 {
8620 /** @todo Check selectors rather than returning all the time. */
8621 Assert(!pVmxTransient->fIsNestedGuest);
8622 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8623 rcStrict = VINF_EM_RESCHEDULE_REM;
8624 }
8625#endif
8626
8627 break;
8628 }
8629
8630 /*
8631 * MOV from CRx.
8632 */
8633 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8634 {
8635 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8636 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8637
8638 /*
8639 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8640 * - When nested paging isn't used.
8641 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8642 * - We are executing in the VM debug loop.
8643 */
8644#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8645# ifndef IN_NEM_DARWIN
8646 Assert( iCrReg != 3
8647 || !VM_IS_VMX_NESTED_PAGING(pVM)
8648 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8649 || pVCpu->hmr0.s.fLeaveDone);
8650# else
8651 Assert( iCrReg != 3
8652 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8653# endif
8654#endif
8655
8656 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8657 Assert( iCrReg != 8
8658 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8659
8660 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8661 break;
8662 }
8663
8664 /*
8665 * CLTS (Clear Task-Switch Flag in CR0).
8666 */
8667 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8668 {
8669 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8670 break;
8671 }
8672
8673 /*
8674 * LMSW (Load Machine-Status Word into CR0).
8675 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8676 */
8677 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8678 {
8679 RTGCPTR GCPtrEffDst;
8680 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8681 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8682 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8683 if (fMemOperand)
8684 {
8685 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8686 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8687 }
8688 else
8689 GCPtrEffDst = NIL_RTGCPTR;
8690 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8691 break;
8692 }
8693
8694 default:
8695 {
8696 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8697 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8698 }
8699 }
8700
8701 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8702 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8703 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8704
8705 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8706 NOREF(pVM);
8707 return rcStrict;
8708}
8709
8710
8711/**
8712 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8713 * VM-exit.
8714 */
8715HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8716{
8717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8718 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8719
8720 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8721 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8722 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8723 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8724#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8725 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8726 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8727 AssertRCReturn(rc, rc);
8728
8729 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8730 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8731 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8732 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8733 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8734 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8735 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8736 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8737
8738 /*
8739 * Update exit history to see if this exit can be optimized.
8740 */
8741 VBOXSTRICTRC rcStrict;
8742 PCEMEXITREC pExitRec = NULL;
8743 if ( !fGstStepping
8744 && !fDbgStepping)
8745 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8746 !fIOString
8747 ? !fIOWrite
8748 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8749 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8750 : !fIOWrite
8751 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8752 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8753 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8754 if (!pExitRec)
8755 {
8756 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8757 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8758
8759 uint32_t const cbValue = s_aIOSizes[uIOSize];
8760 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8761 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8762 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8763 if (fIOString)
8764 {
8765 /*
8766 * INS/OUTS - I/O String instruction.
8767 *
8768 * Use instruction-information if available, otherwise fall back on
8769 * interpreting the instruction.
8770 */
8771 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8772 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8773 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8774 if (fInsOutsInfo)
8775 {
8776 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8777 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8778 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8779 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8780 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8781 if (fIOWrite)
8782 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8783 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8784 else
8785 {
8786 /*
8787 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8788 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8789 * See Intel Instruction spec. for "INS".
8790 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8791 */
8792 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8793 }
8794 }
8795 else
8796 rcStrict = IEMExecOne(pVCpu);
8797
8798 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8799 fUpdateRipAlready = true;
8800 }
8801 else
8802 {
8803 /*
8804 * IN/OUT - I/O instruction.
8805 */
8806 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8807 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8808 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8809 if (fIOWrite)
8810 {
8811 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8812 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8813#ifndef IN_NEM_DARWIN
8814 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8815 && !pCtx->eflags.Bits.u1TF)
8816 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8817#endif
8818 }
8819 else
8820 {
8821 uint32_t u32Result = 0;
8822 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8823 if (IOM_SUCCESS(rcStrict))
8824 {
8825 /* Save result of I/O IN instr. in AL/AX/EAX. */
8826 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8827 }
8828#ifndef IN_NEM_DARWIN
8829 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8830 && !pCtx->eflags.Bits.u1TF)
8831 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8832#endif
8833 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8834 }
8835 }
8836
8837 if (IOM_SUCCESS(rcStrict))
8838 {
8839 if (!fUpdateRipAlready)
8840 {
8841 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8842 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8843 }
8844
8845 /*
8846 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8847 * while booting Fedora 17 64-bit guest.
8848 *
8849 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8850 */
8851 if (fIOString)
8852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8853
8854 /*
8855 * If any I/O breakpoints are armed, we need to check if one triggered
8856 * and take appropriate action.
8857 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8858 */
8859#if 1
8860 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8861#else
8862 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8863 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8864 AssertRCReturn(rc, rc);
8865#endif
8866
8867 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8868 * execution engines about whether hyper BPs and such are pending. */
8869 uint32_t const uDr7 = pCtx->dr[7];
8870 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8871 && X86_DR7_ANY_RW_IO(uDr7)
8872 && (pCtx->cr4 & X86_CR4_DE))
8873 || DBGFBpIsHwIoArmed(pVM)))
8874 {
8875 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8876
8877#ifndef IN_NEM_DARWIN
8878 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8879 VMMRZCallRing3Disable(pVCpu);
8880 HM_DISABLE_PREEMPT(pVCpu);
8881
8882 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8883
8884 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8885 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8886 {
8887 /* Raise #DB. */
8888 if (fIsGuestDbgActive)
8889 ASMSetDR6(pCtx->dr[6]);
8890 if (pCtx->dr[7] != uDr7)
8891 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8892
8893 vmxHCSetPendingXcptDB(pVCpu);
8894 }
8895 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8896 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8897 else if ( rcStrict2 != VINF_SUCCESS
8898 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8899 rcStrict = rcStrict2;
8900 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8901
8902 HM_RESTORE_PREEMPT();
8903 VMMRZCallRing3Enable(pVCpu);
8904#else
8905 /** @todo */
8906#endif
8907 }
8908 }
8909
8910#ifdef VBOX_STRICT
8911 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8912 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8913 Assert(!fIOWrite);
8914 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8915 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8916 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8917 Assert(fIOWrite);
8918 else
8919 {
8920# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8921 * statuses, that the VMM device and some others may return. See
8922 * IOM_SUCCESS() for guidance. */
8923 AssertMsg( RT_FAILURE(rcStrict)
8924 || rcStrict == VINF_SUCCESS
8925 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8926 || rcStrict == VINF_EM_DBG_BREAKPOINT
8927 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8928 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8929# endif
8930 }
8931#endif
8932 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8933 }
8934 else
8935 {
8936 /*
8937 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8938 */
8939 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8940 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8941 AssertRCReturn(rc2, rc2);
8942 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8943 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8944 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8945 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8946 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8947 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8948
8949 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8950 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8951
8952 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8953 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8954 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8955 }
8956 return rcStrict;
8957}
8958
8959
8960/**
8961 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8962 * VM-exit.
8963 */
8964HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8965{
8966 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8967
8968 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8969 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8970 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8971 {
8972 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8973 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8974 {
8975 uint32_t uErrCode;
8976 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8977 {
8978 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8979 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8980 }
8981 else
8982 uErrCode = 0;
8983
8984 RTGCUINTPTR GCPtrFaultAddress;
8985 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8986 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8987 else
8988 GCPtrFaultAddress = 0;
8989
8990 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8991
8992 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8993 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8994
8995 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8996 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8997 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8998 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8999 }
9000 }
9001
9002 /* Fall back to the interpreter to emulate the task-switch. */
9003 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9004 return VERR_EM_INTERPRETER;
9005}
9006
9007
9008/**
9009 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9010 */
9011HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9012{
9013 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9014
9015 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9016 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9017 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9018 AssertRC(rc);
9019 return VINF_EM_DBG_STEPPED;
9020}
9021
9022
9023/**
9024 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9025 */
9026HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9027{
9028 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9029 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9030
9031 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9032 | HMVMX_READ_EXIT_INSTR_LEN
9033 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9034 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9035 | HMVMX_READ_IDT_VECTORING_INFO
9036 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9037
9038 /*
9039 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9040 */
9041 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9042 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9043 {
9044 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9045 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9046 {
9047 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9048 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9049 }
9050 }
9051 else
9052 {
9053 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9054 return rcStrict;
9055 }
9056
9057 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9058 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9059 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9060 AssertRCReturn(rc, rc);
9061
9062 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9063 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9064 switch (uAccessType)
9065 {
9066#ifndef IN_NEM_DARWIN
9067 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9068 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9069 {
9070 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9071 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9072 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9073
9074 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9075 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9076 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9077 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9078 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9079
9080 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9081 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9082 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9083 if ( rcStrict == VINF_SUCCESS
9084 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9085 || rcStrict == VERR_PAGE_NOT_PRESENT)
9086 {
9087 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9088 | HM_CHANGED_GUEST_APIC_TPR);
9089 rcStrict = VINF_SUCCESS;
9090 }
9091 break;
9092 }
9093#else
9094 /** @todo */
9095#endif
9096
9097 default:
9098 {
9099 Log4Func(("uAccessType=%#x\n", uAccessType));
9100 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9101 break;
9102 }
9103 }
9104
9105 if (rcStrict != VINF_SUCCESS)
9106 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9107 return rcStrict;
9108}
9109
9110
9111/**
9112 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9113 * VM-exit.
9114 */
9115HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9116{
9117 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9118 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9119
9120 /*
9121 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9122 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9123 * must emulate the MOV DRx access.
9124 */
9125 if (!pVmxTransient->fIsNestedGuest)
9126 {
9127 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9128 if ( pVmxTransient->fWasGuestDebugStateActive
9129#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9130 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9131#endif
9132 )
9133 {
9134 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9135 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9136 }
9137
9138 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9139 && !pVmxTransient->fWasHyperDebugStateActive)
9140 {
9141 Assert(!DBGFIsStepping(pVCpu));
9142 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9143
9144 /* Whether we disable intercepting MOV DRx instructions and resume
9145 the current one, or emulate it and keep intercepting them is
9146 configurable. Though it usually comes down to whether there are
9147 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9148#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9149 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9150#else
9151 bool const fResumeInstruction = true;
9152#endif
9153 if (fResumeInstruction)
9154 {
9155 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9156 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9157 AssertRC(rc);
9158 }
9159
9160#ifndef IN_NEM_DARWIN
9161 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9162 VMMRZCallRing3Disable(pVCpu);
9163 HM_DISABLE_PREEMPT(pVCpu);
9164
9165 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9166 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9167 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9168
9169 HM_RESTORE_PREEMPT();
9170 VMMRZCallRing3Enable(pVCpu);
9171#else
9172 CPUMR3NemActivateGuestDebugState(pVCpu);
9173 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9174 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9175#endif
9176
9177 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9178 if (fResumeInstruction)
9179 {
9180#ifdef VBOX_WITH_STATISTICS
9181 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9182 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9183 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9184 else
9185 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9186#endif
9187 return VINF_SUCCESS;
9188 }
9189 }
9190 }
9191
9192 /*
9193 * Import state. We must have DR7 loaded here as it's always consulted,
9194 * both for reading and writing. The other debug registers are never
9195 * exported as such.
9196 */
9197 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9198 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9199 | CPUMCTX_EXTRN_GPRS_MASK
9200 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9201 AssertRCReturn(rc, rc);
9202
9203 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9204 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9205 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9206 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9207
9208 VBOXSTRICTRC rcStrict;
9209 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9210 {
9211 /*
9212 * Write DRx register.
9213 */
9214 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9215 AssertMsg( rcStrict == VINF_SUCCESS
9216 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9217
9218 if (rcStrict == VINF_SUCCESS)
9219 {
9220 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9221 * kept it for now to avoid breaking something non-obvious. */
9222 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9223 | HM_CHANGED_GUEST_DR7);
9224 /* Update the DR6 register if guest debug state is active, otherwise we'll
9225 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9226 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9227 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9228 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9229 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9230 }
9231 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9232 {
9233 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9234 rcStrict = VINF_SUCCESS;
9235 }
9236
9237 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9238 }
9239 else
9240 {
9241 /*
9242 * Read DRx register into a general purpose register.
9243 */
9244 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9245 AssertMsg( rcStrict == VINF_SUCCESS
9246 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9247
9248 if (rcStrict == VINF_SUCCESS)
9249 {
9250 if (iGReg == X86_GREG_xSP)
9251 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9252 | HM_CHANGED_GUEST_RSP);
9253 else
9254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9255 }
9256 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9257 {
9258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9259 rcStrict = VINF_SUCCESS;
9260 }
9261
9262 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9263 }
9264
9265 return rcStrict;
9266}
9267
9268
9269/**
9270 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9271 * Conditional VM-exit.
9272 */
9273HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9274{
9275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9276
9277#ifndef IN_NEM_DARWIN
9278 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9279
9280 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9281 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9282 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9283 | HMVMX_READ_IDT_VECTORING_INFO
9284 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9285 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9286
9287 /*
9288 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9289 */
9290 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9291 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9292 {
9293 /*
9294 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9295 * instruction emulation to inject the original event. Otherwise, injecting the original event
9296 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9297 */
9298 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9299 { /* likely */ }
9300 else
9301 {
9302 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9303# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9304 /** @todo NSTVMX: Think about how this should be handled. */
9305 if (pVmxTransient->fIsNestedGuest)
9306 return VERR_VMX_IPE_3;
9307# endif
9308 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9309 }
9310 }
9311 else
9312 {
9313 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9314 return rcStrict;
9315 }
9316
9317 /*
9318 * Get sufficient state and update the exit history entry.
9319 */
9320 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9321 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9322 AssertRCReturn(rc, rc);
9323
9324 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9325 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9326 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9327 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9328 if (!pExitRec)
9329 {
9330 /*
9331 * If we succeed, resume guest execution.
9332 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9333 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9334 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9335 * weird case. See @bugref{6043}.
9336 */
9337 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9338/** @todo bird: We can probably just go straight to IOM here and assume that
9339 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9340 * well. However, we need to address that aliasing workarounds that
9341 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9342 *
9343 * Might also be interesting to see if we can get this done more or
9344 * less locklessly inside IOM. Need to consider the lookup table
9345 * updating and use a bit more carefully first (or do all updates via
9346 * rendezvous) */
9347 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9348 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9349 if ( rcStrict == VINF_SUCCESS
9350 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9351 || rcStrict == VERR_PAGE_NOT_PRESENT)
9352 {
9353 /* Successfully handled MMIO operation. */
9354 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9355 | HM_CHANGED_GUEST_APIC_TPR);
9356 rcStrict = VINF_SUCCESS;
9357 }
9358 }
9359 else
9360 {
9361 /*
9362 * Frequent exit or something needing probing. Call EMHistoryExec.
9363 */
9364 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9365 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9366
9367 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9368 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9369
9370 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9371 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9372 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9373 }
9374 return rcStrict;
9375#else
9376 AssertFailed();
9377 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9378#endif
9379}
9380
9381
9382/**
9383 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9384 * VM-exit.
9385 */
9386HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9387{
9388 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9389#ifndef IN_NEM_DARWIN
9390 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9391
9392 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9393 | HMVMX_READ_EXIT_INSTR_LEN
9394 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9395 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9396 | HMVMX_READ_IDT_VECTORING_INFO
9397 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9398 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9399
9400 /*
9401 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9402 */
9403 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9404 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9405 {
9406 /*
9407 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9408 * we shall resolve the nested #PF and re-inject the original event.
9409 */
9410 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9411 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9412 }
9413 else
9414 {
9415 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9416 return rcStrict;
9417 }
9418
9419 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9420 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9421 AssertRCReturn(rc, rc);
9422
9423 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9424 uint64_t const uExitQual = pVmxTransient->uExitQual;
9425 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9426
9427 RTGCUINT uErrorCode = 0;
9428 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9429 uErrorCode |= X86_TRAP_PF_ID;
9430 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9431 uErrorCode |= X86_TRAP_PF_RW;
9432 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9433 uErrorCode |= X86_TRAP_PF_P;
9434
9435 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9436 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9437
9438 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9439
9440 /*
9441 * Handle the pagefault trap for the nested shadow table.
9442 */
9443 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9444 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9445 TRPMResetTrap(pVCpu);
9446
9447 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9448 if ( rcStrict == VINF_SUCCESS
9449 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9450 || rcStrict == VERR_PAGE_NOT_PRESENT)
9451 {
9452 /* Successfully synced our nested page tables. */
9453 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9454 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9455 return VINF_SUCCESS;
9456 }
9457 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9458 return rcStrict;
9459
9460#else /* IN_NEM_DARWIN */
9461 PVM pVM = pVCpu->CTX_SUFF(pVM);
9462 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9463 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9464 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9465 vmxHCImportGuestRip(pVCpu);
9466 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9467
9468 /*
9469 * Ask PGM for information about the given GCPhys. We need to check if we're
9470 * out of sync first.
9471 */
9472 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9473 false,
9474 false };
9475 PGMPHYSNEMPAGEINFO Info;
9476 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9477 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9478 if (RT_SUCCESS(rc))
9479 {
9480 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9481 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9482 {
9483 if (State.fCanResume)
9484 {
9485 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9486 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9487 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9488 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9489 State.fDidSomething ? "" : " no-change"));
9490 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9491 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9492 return VINF_SUCCESS;
9493 }
9494 }
9495
9496 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9497 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9498 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9499 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9500 State.fDidSomething ? "" : " no-change"));
9501 }
9502 else
9503 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9504 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9505 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9506
9507 /*
9508 * Emulate the memory access, either access handler or special memory.
9509 */
9510 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9511 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9512 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9513 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9514 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9515
9516 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9517 AssertRCReturn(rc, rc);
9518
9519 VBOXSTRICTRC rcStrict;
9520 if (!pExitRec)
9521 rcStrict = IEMExecOne(pVCpu);
9522 else
9523 {
9524 /* Frequent access or probing. */
9525 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9526 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9527 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9528 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9529 }
9530
9531 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9532
9533 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9534 return rcStrict;
9535#endif /* IN_NEM_DARWIN */
9536}
9537
9538#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9539
9540/**
9541 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9542 */
9543HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9544{
9545 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9546
9547 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9548 | HMVMX_READ_EXIT_INSTR_INFO
9549 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9550 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9551 | CPUMCTX_EXTRN_SREG_MASK
9552 | CPUMCTX_EXTRN_HWVIRT
9553 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9554 AssertRCReturn(rc, rc);
9555
9556 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9557
9558 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9559 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9560
9561 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9563 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9564 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9565 {
9566 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9567 rcStrict = VINF_SUCCESS;
9568 }
9569 return rcStrict;
9570}
9571
9572
9573/**
9574 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9575 */
9576HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9577{
9578 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9579
9580 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9581 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9582 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9583 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9584 AssertRCReturn(rc, rc);
9585
9586 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9587
9588 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9589 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9590 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9591 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9592 {
9593 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9594 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9595 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9596 }
9597 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9598 return rcStrict;
9599}
9600
9601
9602/**
9603 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9604 */
9605HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9606{
9607 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9608
9609 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9610 | HMVMX_READ_EXIT_INSTR_INFO
9611 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9612 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9613 | CPUMCTX_EXTRN_SREG_MASK
9614 | CPUMCTX_EXTRN_HWVIRT
9615 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9616 AssertRCReturn(rc, rc);
9617
9618 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9619
9620 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9621 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9622
9623 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9624 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9625 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9626 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9627 {
9628 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9629 rcStrict = VINF_SUCCESS;
9630 }
9631 return rcStrict;
9632}
9633
9634
9635/**
9636 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9637 */
9638HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9639{
9640 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9641
9642 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9643 | HMVMX_READ_EXIT_INSTR_INFO
9644 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9645 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9646 | CPUMCTX_EXTRN_SREG_MASK
9647 | CPUMCTX_EXTRN_HWVIRT
9648 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9649 AssertRCReturn(rc, rc);
9650
9651 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9652
9653 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9654 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9655
9656 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9657 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9658 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9659 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9660 {
9661 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9662 rcStrict = VINF_SUCCESS;
9663 }
9664 return rcStrict;
9665}
9666
9667
9668/**
9669 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9670 */
9671HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9672{
9673 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9674
9675 /*
9676 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9677 * thus might not need to import the shadow VMCS state, it's safer just in case
9678 * code elsewhere dares look at unsynced VMCS fields.
9679 */
9680 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9681 | HMVMX_READ_EXIT_INSTR_INFO
9682 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9683 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9684 | CPUMCTX_EXTRN_SREG_MASK
9685 | CPUMCTX_EXTRN_HWVIRT
9686 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9687 AssertRCReturn(rc, rc);
9688
9689 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9690
9691 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9692 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9693 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9694
9695 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9696 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9697 {
9698 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9699
9700# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9701 /* Try for exit optimization. This is on the following instruction
9702 because it would be a waste of time to have to reinterpret the
9703 already decoded vmwrite instruction. */
9704 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9705 if (pExitRec)
9706 {
9707 /* Frequent access or probing. */
9708 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9709 AssertRCReturn(rc, rc);
9710
9711 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9712 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9713 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9714 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9716 }
9717# endif
9718 }
9719 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9720 {
9721 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9722 rcStrict = VINF_SUCCESS;
9723 }
9724 return rcStrict;
9725}
9726
9727
9728/**
9729 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9730 */
9731HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9732{
9733 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9734
9735 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9736 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9737 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9738 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9739 AssertRCReturn(rc, rc);
9740
9741 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9742
9743 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9744 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9745 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9746 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9747 {
9748 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9749 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9750 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9751 }
9752 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9753 return rcStrict;
9754}
9755
9756
9757/**
9758 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9759 */
9760HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9761{
9762 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9763
9764 /*
9765 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9766 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9767 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9768 */
9769 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9770 | HMVMX_READ_EXIT_INSTR_INFO
9771 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9772 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9773 | CPUMCTX_EXTRN_SREG_MASK
9774 | CPUMCTX_EXTRN_HWVIRT
9775 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9776 AssertRCReturn(rc, rc);
9777
9778 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9779
9780 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9781 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9782 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9783
9784 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9785 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9786 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9787 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9788 {
9789 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9790 rcStrict = VINF_SUCCESS;
9791 }
9792 return rcStrict;
9793}
9794
9795
9796/**
9797 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9798 */
9799HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9800{
9801 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9802
9803 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9804 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9805 | CPUMCTX_EXTRN_HWVIRT
9806 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9807 AssertRCReturn(rc, rc);
9808
9809 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9810
9811 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9812 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9814 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9815 {
9816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9817 rcStrict = VINF_SUCCESS;
9818 }
9819 return rcStrict;
9820}
9821
9822
9823/**
9824 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9825 */
9826HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9827{
9828 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9829
9830 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9831 | HMVMX_READ_EXIT_INSTR_INFO
9832 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9833 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9834 | CPUMCTX_EXTRN_SREG_MASK
9835 | CPUMCTX_EXTRN_HWVIRT
9836 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9837 AssertRCReturn(rc, rc);
9838
9839 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9840
9841 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9842 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9843
9844 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9845 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9846 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9847 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9848 {
9849 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9850 rcStrict = VINF_SUCCESS;
9851 }
9852 return rcStrict;
9853}
9854
9855
9856/**
9857 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9858 */
9859HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9860{
9861 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9862
9863 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9864 | HMVMX_READ_EXIT_INSTR_INFO
9865 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9866 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9867 | CPUMCTX_EXTRN_SREG_MASK
9868 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9869 AssertRCReturn(rc, rc);
9870
9871 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9872
9873 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9874 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9875
9876 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9877 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9879 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9880 {
9881 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9882 rcStrict = VINF_SUCCESS;
9883 }
9884 return rcStrict;
9885}
9886
9887
9888# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9889/**
9890 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9891 */
9892HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9893{
9894 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9895
9896 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9897 | HMVMX_READ_EXIT_INSTR_INFO
9898 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9899 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9900 | CPUMCTX_EXTRN_SREG_MASK
9901 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9902 AssertRCReturn(rc, rc);
9903
9904 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9905
9906 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9907 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9908
9909 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9910 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9911 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9912 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9913 {
9914 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9915 rcStrict = VINF_SUCCESS;
9916 }
9917 return rcStrict;
9918}
9919# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9920#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9921/** @} */
9922
9923
9924#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9925/** @name Nested-guest VM-exit handlers.
9926 * @{
9927 */
9928/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9929/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9930/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9931
9932/**
9933 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9934 * Conditional VM-exit.
9935 */
9936HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9937{
9938 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9939
9940 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9941
9942 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9943 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9944 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9945
9946 switch (uExitIntType)
9947 {
9948# ifndef IN_NEM_DARWIN
9949 /*
9950 * Physical NMIs:
9951 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9952 */
9953 case VMX_EXIT_INT_INFO_TYPE_NMI:
9954 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9955# endif
9956
9957 /*
9958 * Hardware exceptions,
9959 * Software exceptions,
9960 * Privileged software exceptions:
9961 * Figure out if the exception must be delivered to the guest or the nested-guest.
9962 */
9963 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9964 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9965 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9966 {
9967 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9968 | HMVMX_READ_EXIT_INSTR_LEN
9969 | HMVMX_READ_IDT_VECTORING_INFO
9970 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9971
9972 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9973 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9974 {
9975 /* Exit qualification is required for debug and page-fault exceptions. */
9976 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9977
9978 /*
9979 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9980 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9981 * length. However, if delivery of a software interrupt, software exception or privileged
9982 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9983 */
9984 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9985 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9986 pVmxTransient->uExitIntErrorCode,
9987 pVmxTransient->uIdtVectoringInfo,
9988 pVmxTransient->uIdtVectoringErrorCode);
9989#ifdef DEBUG_ramshankar
9990 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9991 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9992 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9993 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9994 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9995 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9996#endif
9997 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9998 }
9999
10000 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10001 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10002 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10003 }
10004
10005 /*
10006 * Software interrupts:
10007 * VM-exits cannot be caused by software interrupts.
10008 *
10009 * External interrupts:
10010 * This should only happen when "acknowledge external interrupts on VM-exit"
10011 * control is set. However, we never set this when executing a guest or
10012 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10013 * the guest.
10014 */
10015 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10016 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10017 default:
10018 {
10019 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10020 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10021 }
10022 }
10023}
10024
10025
10026/**
10027 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10028 * Unconditional VM-exit.
10029 */
10030HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10031{
10032 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10033 return IEMExecVmxVmexitTripleFault(pVCpu);
10034}
10035
10036
10037/**
10038 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10039 */
10040HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10041{
10042 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10043
10044 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10045 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10046 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10047}
10048
10049
10050/**
10051 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10052 */
10053HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10054{
10055 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10056
10057 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10058 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10059 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10060}
10061
10062
10063/**
10064 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10065 * Unconditional VM-exit.
10066 */
10067HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10068{
10069 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10070
10071 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10072 | HMVMX_READ_EXIT_INSTR_LEN
10073 | HMVMX_READ_IDT_VECTORING_INFO
10074 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10075
10076 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10077 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10078 pVmxTransient->uIdtVectoringErrorCode);
10079 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10085 */
10086HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10087{
10088 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10089
10090 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10091 {
10092 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10093 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10094 }
10095 return vmxHCExitHlt(pVCpu, pVmxTransient);
10096}
10097
10098
10099/**
10100 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10101 */
10102HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10103{
10104 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10105
10106 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10107 {
10108 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10109 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10110 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10111 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10112 }
10113 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10114}
10115
10116
10117/**
10118 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10119 */
10120HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10121{
10122 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10123
10124 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10125 {
10126 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10127 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10128 }
10129 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10130}
10131
10132
10133/**
10134 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10135 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10136 */
10137HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10138{
10139 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10140
10141 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10142 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10143
10144 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10145
10146 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10147 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10148 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10149
10150 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10151 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10152 u64VmcsField &= UINT64_C(0xffffffff);
10153
10154 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10155 {
10156 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10157 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10158 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10159 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10160 }
10161
10162 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10163 return vmxHCExitVmread(pVCpu, pVmxTransient);
10164 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10165}
10166
10167
10168/**
10169 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10170 */
10171HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10172{
10173 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10174
10175 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10176 {
10177 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10178 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10179 }
10180
10181 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10182}
10183
10184
10185/**
10186 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10187 * Conditional VM-exit.
10188 */
10189HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10190{
10191 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10192
10193 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10194 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10195
10196 VBOXSTRICTRC rcStrict;
10197 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10198 switch (uAccessType)
10199 {
10200 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10201 {
10202 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10203 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10204 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10205 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10206
10207 bool fIntercept;
10208 switch (iCrReg)
10209 {
10210 case 0:
10211 case 4:
10212 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10213 break;
10214
10215 case 3:
10216 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10217 break;
10218
10219 case 8:
10220 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10221 break;
10222
10223 default:
10224 fIntercept = false;
10225 break;
10226 }
10227 if (fIntercept)
10228 {
10229 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10230 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10231 }
10232 else
10233 {
10234 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10235 AssertRCReturn(rc, rc);
10236 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10237 }
10238 break;
10239 }
10240
10241 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10242 {
10243 /*
10244 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10245 * CR2 reads do not cause a VM-exit.
10246 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10247 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10248 */
10249 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10250 if ( iCrReg == 3
10251 || iCrReg == 8)
10252 {
10253 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10254 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10255 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10256 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10257 {
10258 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10259 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10260 }
10261 else
10262 {
10263 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10264 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10265 }
10266 }
10267 else
10268 {
10269 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10270 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10271 }
10272 break;
10273 }
10274
10275 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10276 {
10277 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10278 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10279 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10280 if ( (uGstHostMask & X86_CR0_TS)
10281 && (uReadShadow & X86_CR0_TS))
10282 {
10283 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10284 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10285 }
10286 else
10287 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10288 break;
10289 }
10290
10291 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10292 {
10293 RTGCPTR GCPtrEffDst;
10294 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10295 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10296 if (fMemOperand)
10297 {
10298 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10299 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10300 }
10301 else
10302 GCPtrEffDst = NIL_RTGCPTR;
10303
10304 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10305 {
10306 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10307 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10308 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10309 }
10310 else
10311 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10312 break;
10313 }
10314
10315 default:
10316 {
10317 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10318 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10319 }
10320 }
10321
10322 if (rcStrict == VINF_IEM_RAISED_XCPT)
10323 {
10324 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10325 rcStrict = VINF_SUCCESS;
10326 }
10327 return rcStrict;
10328}
10329
10330
10331/**
10332 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10333 * Conditional VM-exit.
10334 */
10335HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10336{
10337 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10338
10339 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10340 {
10341 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10342 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10343 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10344 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10345 }
10346 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10347}
10348
10349
10350/**
10351 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10352 * Conditional VM-exit.
10353 */
10354HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10355{
10356 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10357
10358 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10359
10360 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10361 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10362 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10363
10364 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10365 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10366 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10367 {
10368 /*
10369 * IN/OUT instruction:
10370 * - Provides VM-exit instruction length.
10371 *
10372 * INS/OUTS instruction:
10373 * - Provides VM-exit instruction length.
10374 * - Provides Guest-linear address.
10375 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10376 */
10377 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10378 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10379
10380 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10381 pVmxTransient->ExitInstrInfo.u = 0;
10382 pVmxTransient->uGuestLinearAddr = 0;
10383
10384 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10385 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10386 if (fIOString)
10387 {
10388 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10389 if (fVmxInsOutsInfo)
10390 {
10391 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10392 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10393 }
10394 }
10395
10396 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10397 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10398 }
10399 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10400}
10401
10402
10403/**
10404 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10405 */
10406HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10407{
10408 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10409
10410 uint32_t fMsrpm;
10411 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10412 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10413 else
10414 fMsrpm = VMXMSRPM_EXIT_RD;
10415
10416 if (fMsrpm & VMXMSRPM_EXIT_RD)
10417 {
10418 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10419 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10420 }
10421 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10422}
10423
10424
10425/**
10426 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10427 */
10428HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10429{
10430 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10431
10432 uint32_t fMsrpm;
10433 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10434 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10435 else
10436 fMsrpm = VMXMSRPM_EXIT_WR;
10437
10438 if (fMsrpm & VMXMSRPM_EXIT_WR)
10439 {
10440 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10441 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10442 }
10443 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10444}
10445
10446
10447/**
10448 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10449 */
10450HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10451{
10452 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10453
10454 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10455 {
10456 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10457 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10458 }
10459 return vmxHCExitMwait(pVCpu, pVmxTransient);
10460}
10461
10462
10463/**
10464 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10465 * VM-exit.
10466 */
10467HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10468{
10469 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10470
10471 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10472 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10473 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10474 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10475}
10476
10477
10478/**
10479 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10480 */
10481HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10482{
10483 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10484
10485 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10486 {
10487 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10488 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10489 }
10490 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10491}
10492
10493
10494/**
10495 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10496 */
10497HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10498{
10499 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10500
10501 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10502 * PAUSE when executing a nested-guest? If it does not, we would not need
10503 * to check for the intercepts here. Just call VM-exit... */
10504
10505 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10506 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10507 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10508 {
10509 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10510 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10511 }
10512 return vmxHCExitPause(pVCpu, pVmxTransient);
10513}
10514
10515
10516/**
10517 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10518 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10519 */
10520HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10521{
10522 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10523
10524 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10525 {
10526 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10527 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10528 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10529 }
10530 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10531}
10532
10533
10534/**
10535 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10536 * VM-exit.
10537 */
10538HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10539{
10540 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10541
10542 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10543 | HMVMX_READ_EXIT_INSTR_LEN
10544 | HMVMX_READ_IDT_VECTORING_INFO
10545 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10546
10547 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10548
10549 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10550 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10551
10552 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10553 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10554 pVmxTransient->uIdtVectoringErrorCode);
10555 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10556}
10557
10558
10559/**
10560 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10561 * Conditional VM-exit.
10562 */
10563HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10564{
10565 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10566
10567 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10568 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10569 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10570}
10571
10572
10573/**
10574 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10575 * Conditional VM-exit.
10576 */
10577HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10578{
10579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10580
10581 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10582 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10583 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10584}
10585
10586
10587/**
10588 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10589 */
10590HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10591{
10592 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10593
10594 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10595 {
10596 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10597 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10598 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10599 }
10600 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10601}
10602
10603
10604/**
10605 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10606 */
10607HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10608{
10609 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10610
10611 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10612 {
10613 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10614 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10615 }
10616 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10617}
10618
10619
10620/**
10621 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10622 */
10623HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10624{
10625 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10626
10627 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10628 {
10629 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10630 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10631 | HMVMX_READ_EXIT_INSTR_INFO
10632 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10633 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10634 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10635 }
10636 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10637}
10638
10639
10640/**
10641 * Nested-guest VM-exit handler for invalid-guest state
10642 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10643 */
10644HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10645{
10646 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10647
10648 /*
10649 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10650 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10651 * Handle it like it's in an invalid guest state of the outer guest.
10652 *
10653 * When the fast path is implemented, this should be changed to cause the corresponding
10654 * nested-guest VM-exit.
10655 */
10656 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10657}
10658
10659
10660/**
10661 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10662 * and only provide the instruction length.
10663 *
10664 * Unconditional VM-exit.
10665 */
10666HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10667{
10668 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10669
10670#ifdef VBOX_STRICT
10671 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10672 switch (pVmxTransient->uExitReason)
10673 {
10674 case VMX_EXIT_ENCLS:
10675 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10676 break;
10677
10678 case VMX_EXIT_VMFUNC:
10679 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10680 break;
10681 }
10682#endif
10683
10684 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10685 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10686}
10687
10688
10689/**
10690 * Nested-guest VM-exit handler for instructions that provide instruction length as
10691 * well as more information.
10692 *
10693 * Unconditional VM-exit.
10694 */
10695HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10696{
10697 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10698
10699# ifdef VBOX_STRICT
10700 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10701 switch (pVmxTransient->uExitReason)
10702 {
10703 case VMX_EXIT_GDTR_IDTR_ACCESS:
10704 case VMX_EXIT_LDTR_TR_ACCESS:
10705 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10706 break;
10707
10708 case VMX_EXIT_RDRAND:
10709 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10710 break;
10711
10712 case VMX_EXIT_RDSEED:
10713 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10714 break;
10715
10716 case VMX_EXIT_XSAVES:
10717 case VMX_EXIT_XRSTORS:
10718 /** @todo NSTVMX: Verify XSS-bitmap. */
10719 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10720 break;
10721
10722 case VMX_EXIT_UMWAIT:
10723 case VMX_EXIT_TPAUSE:
10724 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10725 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10726 break;
10727
10728 case VMX_EXIT_LOADIWKEY:
10729 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10730 break;
10731 }
10732# endif
10733
10734 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10735 | HMVMX_READ_EXIT_INSTR_LEN
10736 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10737 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10738 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10739}
10740
10741# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10742
10743/**
10744 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10745 * Conditional VM-exit.
10746 */
10747HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10748{
10749 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10750 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10751
10752 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10753 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10754 {
10755 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10756 | HMVMX_READ_EXIT_INSTR_LEN
10757 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10758 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10759 | HMVMX_READ_IDT_VECTORING_INFO
10760 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10761 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10762 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10763 AssertRCReturn(rc, rc);
10764
10765 /*
10766 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10767 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10768 * it's its problem to deal with that issue and we'll clear the recovered event.
10769 */
10770 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10772 { /*likely*/ }
10773 else
10774 {
10775 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10776 return rcStrict;
10777 }
10778 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10779
10780 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10781 uint64_t const uExitQual = pVmxTransient->uExitQual;
10782
10783 RTGCPTR GCPtrNestedFault;
10784 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10785 if (fIsLinearAddrValid)
10786 {
10787 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10788 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10789 }
10790 else
10791 GCPtrNestedFault = 0;
10792
10793 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10794 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10795 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10796 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10797 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10798
10799 PGMPTWALK Walk;
10800 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10801 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10802 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10803 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10804 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10805 if (RT_SUCCESS(rcStrict))
10806 return rcStrict;
10807
10808 if (fClearEventOnForward)
10809 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10810
10811 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10812 pVmxTransient->uIdtVectoringErrorCode);
10813 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10814 {
10815 VMXVEXITINFO const ExitInfo
10816 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10817 pVmxTransient->uExitQual,
10818 pVmxTransient->cbExitInstr,
10819 pVmxTransient->uGuestLinearAddr,
10820 pVmxTransient->uGuestPhysicalAddr);
10821 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10822 }
10823
10824 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10825 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10826 }
10827
10828 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10829}
10830
10831
10832/**
10833 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10834 * Conditional VM-exit.
10835 */
10836HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10837{
10838 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10839 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10840
10841 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10842 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10843 {
10844 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10845 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10846 AssertRCReturn(rc, rc);
10847
10848 PGMPTWALK Walk;
10849 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10850 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10851 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10852 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10853 0 /* GCPtrNestedFault */, &Walk);
10854 if (RT_SUCCESS(rcStrict))
10855 {
10856 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10857 return rcStrict;
10858 }
10859
10860 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10861 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10862 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10863
10864 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10865 pVmxTransient->uIdtVectoringErrorCode);
10866 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10867 }
10868
10869 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10870}
10871
10872# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10873
10874/** @} */
10875#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10876
10877
10878/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10879 * probes.
10880 *
10881 * The following few functions and associated structure contains the bloat
10882 * necessary for providing detailed debug events and dtrace probes as well as
10883 * reliable host side single stepping. This works on the principle of
10884 * "subclassing" the normal execution loop and workers. We replace the loop
10885 * method completely and override selected helpers to add necessary adjustments
10886 * to their core operation.
10887 *
10888 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10889 * any performance for debug and analysis features.
10890 *
10891 * @{
10892 */
10893
10894/**
10895 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10896 * the debug run loop.
10897 */
10898typedef struct VMXRUNDBGSTATE
10899{
10900 /** The RIP we started executing at. This is for detecting that we stepped. */
10901 uint64_t uRipStart;
10902 /** The CS we started executing with. */
10903 uint16_t uCsStart;
10904
10905 /** Whether we've actually modified the 1st execution control field. */
10906 bool fModifiedProcCtls : 1;
10907 /** Whether we've actually modified the 2nd execution control field. */
10908 bool fModifiedProcCtls2 : 1;
10909 /** Whether we've actually modified the exception bitmap. */
10910 bool fModifiedXcptBitmap : 1;
10911
10912 /** We desire the modified the CR0 mask to be cleared. */
10913 bool fClearCr0Mask : 1;
10914 /** We desire the modified the CR4 mask to be cleared. */
10915 bool fClearCr4Mask : 1;
10916 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10917 uint32_t fCpe1Extra;
10918 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10919 uint32_t fCpe1Unwanted;
10920 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10921 uint32_t fCpe2Extra;
10922 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10923 uint32_t bmXcptExtra;
10924 /** The sequence number of the Dtrace provider settings the state was
10925 * configured against. */
10926 uint32_t uDtraceSettingsSeqNo;
10927 /** VM-exits to check (one bit per VM-exit). */
10928 uint32_t bmExitsToCheck[3];
10929
10930 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10931 uint32_t fProcCtlsInitial;
10932 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10933 uint32_t fProcCtls2Initial;
10934 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10935 uint32_t bmXcptInitial;
10936} VMXRUNDBGSTATE;
10937AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10938typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10939
10940
10941/**
10942 * Initializes the VMXRUNDBGSTATE structure.
10943 *
10944 * @param pVCpu The cross context virtual CPU structure of the
10945 * calling EMT.
10946 * @param pVmxTransient The VMX-transient structure.
10947 * @param pDbgState The debug state to initialize.
10948 */
10949static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10950{
10951 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10952 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10953
10954 pDbgState->fModifiedProcCtls = false;
10955 pDbgState->fModifiedProcCtls2 = false;
10956 pDbgState->fModifiedXcptBitmap = false;
10957 pDbgState->fClearCr0Mask = false;
10958 pDbgState->fClearCr4Mask = false;
10959 pDbgState->fCpe1Extra = 0;
10960 pDbgState->fCpe1Unwanted = 0;
10961 pDbgState->fCpe2Extra = 0;
10962 pDbgState->bmXcptExtra = 0;
10963 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10964 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10965 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10966}
10967
10968
10969/**
10970 * Updates the VMSC fields with changes requested by @a pDbgState.
10971 *
10972 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10973 * immediately before executing guest code, i.e. when interrupts are disabled.
10974 * We don't check status codes here as we cannot easily assert or return in the
10975 * latter case.
10976 *
10977 * @param pVCpu The cross context virtual CPU structure.
10978 * @param pVmxTransient The VMX-transient structure.
10979 * @param pDbgState The debug state.
10980 */
10981static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10982{
10983 /*
10984 * Ensure desired flags in VMCS control fields are set.
10985 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10986 *
10987 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10988 * there should be no stale data in pCtx at this point.
10989 */
10990 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10991 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10992 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10993 {
10994 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10995 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10996 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10997 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10998 pDbgState->fModifiedProcCtls = true;
10999 }
11000
11001 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11002 {
11003 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11004 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11005 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11006 pDbgState->fModifiedProcCtls2 = true;
11007 }
11008
11009 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11010 {
11011 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11012 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11013 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11014 pDbgState->fModifiedXcptBitmap = true;
11015 }
11016
11017 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11018 {
11019 pVmcsInfo->u64Cr0Mask = 0;
11020 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11021 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11022 }
11023
11024 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11025 {
11026 pVmcsInfo->u64Cr4Mask = 0;
11027 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11028 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11029 }
11030
11031 NOREF(pVCpu);
11032}
11033
11034
11035/**
11036 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11037 * re-entry next time around.
11038 *
11039 * @returns Strict VBox status code (i.e. informational status codes too).
11040 * @param pVCpu The cross context virtual CPU structure.
11041 * @param pVmxTransient The VMX-transient structure.
11042 * @param pDbgState The debug state.
11043 * @param rcStrict The return code from executing the guest using single
11044 * stepping.
11045 */
11046static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11047 VBOXSTRICTRC rcStrict)
11048{
11049 /*
11050 * Restore VM-exit control settings as we may not reenter this function the
11051 * next time around.
11052 */
11053 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11054
11055 /* We reload the initial value, trigger what we can of recalculations the
11056 next time around. From the looks of things, that's all that's required atm. */
11057 if (pDbgState->fModifiedProcCtls)
11058 {
11059 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11060 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11061 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11062 AssertRC(rc2);
11063 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11064 }
11065
11066 /* We're currently the only ones messing with this one, so just restore the
11067 cached value and reload the field. */
11068 if ( pDbgState->fModifiedProcCtls2
11069 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11070 {
11071 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11072 AssertRC(rc2);
11073 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11074 }
11075
11076 /* If we've modified the exception bitmap, we restore it and trigger
11077 reloading and partial recalculation the next time around. */
11078 if (pDbgState->fModifiedXcptBitmap)
11079 {
11080 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11081 AssertRC(rc2);
11082 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11083 }
11084
11085 return rcStrict;
11086}
11087
11088
11089/**
11090 * Configures VM-exit controls for current DBGF and DTrace settings.
11091 *
11092 * This updates @a pDbgState and the VMCS execution control fields to reflect
11093 * the necessary VM-exits demanded by DBGF and DTrace.
11094 *
11095 * @param pVCpu The cross context virtual CPU structure.
11096 * @param pVmxTransient The VMX-transient structure. May update
11097 * fUpdatedTscOffsettingAndPreemptTimer.
11098 * @param pDbgState The debug state.
11099 */
11100static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11101{
11102#ifndef IN_NEM_DARWIN
11103 /*
11104 * Take down the dtrace serial number so we can spot changes.
11105 */
11106 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11107 ASMCompilerBarrier();
11108#endif
11109
11110 /*
11111 * We'll rebuild most of the middle block of data members (holding the
11112 * current settings) as we go along here, so start by clearing it all.
11113 */
11114 pDbgState->bmXcptExtra = 0;
11115 pDbgState->fCpe1Extra = 0;
11116 pDbgState->fCpe1Unwanted = 0;
11117 pDbgState->fCpe2Extra = 0;
11118 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11119 pDbgState->bmExitsToCheck[i] = 0;
11120
11121 /*
11122 * Software interrupts (INT XXh) - no idea how to trigger these...
11123 */
11124 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11125 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11126 || VBOXVMM_INT_SOFTWARE_ENABLED())
11127 {
11128 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11129 }
11130
11131 /*
11132 * INT3 breakpoints - triggered by #BP exceptions.
11133 */
11134 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11135 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11136
11137 /*
11138 * Exception bitmap and XCPT events+probes.
11139 */
11140 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11141 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11142 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11143
11144 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11145 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11146 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11147 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11148 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11149 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11150 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11151 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11152 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11153 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11154 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11155 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11156 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11157 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11158 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11159 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11160 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11161 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11162
11163 if (pDbgState->bmXcptExtra)
11164 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11165
11166 /*
11167 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11168 *
11169 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11170 * So, when adding/changing/removing please don't forget to update it.
11171 *
11172 * Some of the macros are picking up local variables to save horizontal space,
11173 * (being able to see it in a table is the lesser evil here).
11174 */
11175#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11176 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11177 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11178#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11179 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11180 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11181 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11182 } else do { } while (0)
11183#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11184 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11185 { \
11186 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11187 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11188 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11189 } else do { } while (0)
11190#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11191 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11192 { \
11193 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11194 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11195 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11196 } else do { } while (0)
11197#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11198 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11199 { \
11200 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11201 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11202 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11203 } else do { } while (0)
11204
11205 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11206 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11207 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11208 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11209 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11210
11211 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11212 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11213 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11214 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11215 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11216 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11217 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11218 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11219 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11220 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11221 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11222 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11223 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11224 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11225 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11226 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11227 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11228 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11229 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11230 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11231 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11232 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11233 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11234 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11235 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11236 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11237 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11238 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11239 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11240 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11241 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11242 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11243 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11244 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11245 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11246 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11247
11248 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11249 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11250 {
11251 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11252 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11253 AssertRC(rc);
11254
11255#if 0 /** @todo fix me */
11256 pDbgState->fClearCr0Mask = true;
11257 pDbgState->fClearCr4Mask = true;
11258#endif
11259 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11260 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11261 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11262 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11263 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11264 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11265 require clearing here and in the loop if we start using it. */
11266 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11267 }
11268 else
11269 {
11270 if (pDbgState->fClearCr0Mask)
11271 {
11272 pDbgState->fClearCr0Mask = false;
11273 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11274 }
11275 if (pDbgState->fClearCr4Mask)
11276 {
11277 pDbgState->fClearCr4Mask = false;
11278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11279 }
11280 }
11281 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11283
11284 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11285 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11286 {
11287 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11288 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11289 }
11290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11292
11293 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11295 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11297 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11299 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11301#if 0 /** @todo too slow, fix handler. */
11302 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11303#endif
11304 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11305
11306 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11307 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11308 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11309 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11310 {
11311 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11312 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11313 }
11314 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11315 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11316 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11318
11319 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11320 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11321 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11322 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11323 {
11324 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11325 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11326 }
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11329 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11330 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11331
11332 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11333 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11334 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11335 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11336 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11337 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11338 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11339 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11340 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11341 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11342 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11343 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11344 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11345 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11346 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11347 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11348 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11349 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11350 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11351 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11352 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11353 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11354
11355#undef IS_EITHER_ENABLED
11356#undef SET_ONLY_XBM_IF_EITHER_EN
11357#undef SET_CPE1_XBM_IF_EITHER_EN
11358#undef SET_CPEU_XBM_IF_EITHER_EN
11359#undef SET_CPE2_XBM_IF_EITHER_EN
11360
11361 /*
11362 * Sanitize the control stuff.
11363 */
11364 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11365 if (pDbgState->fCpe2Extra)
11366 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11367 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11368 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11369#ifndef IN_NEM_DARWIN
11370 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11371 {
11372 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11373 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11374 }
11375#else
11376 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11377 {
11378 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11379 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11380 }
11381#endif
11382
11383 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11384 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11385 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11386 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11387}
11388
11389
11390/**
11391 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11392 * appropriate.
11393 *
11394 * The caller has checked the VM-exit against the
11395 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11396 * already, so we don't have to do that either.
11397 *
11398 * @returns Strict VBox status code (i.e. informational status codes too).
11399 * @param pVCpu The cross context virtual CPU structure.
11400 * @param pVmxTransient The VMX-transient structure.
11401 * @param uExitReason The VM-exit reason.
11402 *
11403 * @remarks The name of this function is displayed by dtrace, so keep it short
11404 * and to the point. No longer than 33 chars long, please.
11405 */
11406static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11407{
11408 /*
11409 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11410 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11411 *
11412 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11413 * does. Must add/change/remove both places. Same ordering, please.
11414 *
11415 * Added/removed events must also be reflected in the next section
11416 * where we dispatch dtrace events.
11417 */
11418 bool fDtrace1 = false;
11419 bool fDtrace2 = false;
11420 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11421 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11422 uint32_t uEventArg = 0;
11423#define SET_EXIT(a_EventSubName) \
11424 do { \
11425 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11426 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11427 } while (0)
11428#define SET_BOTH(a_EventSubName) \
11429 do { \
11430 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11431 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11432 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11433 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11434 } while (0)
11435 switch (uExitReason)
11436 {
11437 case VMX_EXIT_MTF:
11438 return vmxHCExitMtf(pVCpu, pVmxTransient);
11439
11440 case VMX_EXIT_XCPT_OR_NMI:
11441 {
11442 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11443 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11444 {
11445 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11446 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11447 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11448 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11449 {
11450 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11451 {
11452 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11453 uEventArg = pVmxTransient->uExitIntErrorCode;
11454 }
11455 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11456 switch (enmEvent1)
11457 {
11458 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11459 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11460 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11461 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11462 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11463 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11464 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11465 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11466 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11467 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11468 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11469 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11470 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11471 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11472 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11473 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11474 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11475 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11476 default: break;
11477 }
11478 }
11479 else
11480 AssertFailed();
11481 break;
11482
11483 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11484 uEventArg = idxVector;
11485 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11486 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11487 break;
11488 }
11489 break;
11490 }
11491
11492 case VMX_EXIT_TRIPLE_FAULT:
11493 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11494 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11495 break;
11496 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11497 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11498 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11499 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11500 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11501
11502 /* Instruction specific VM-exits: */
11503 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11504 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11505 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11506 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11507 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11508 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11509 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11510 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11511 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11512 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11513 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11514 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11515 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11516 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11517 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11518 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11519 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11520 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11521 case VMX_EXIT_MOV_CRX:
11522 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11523 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11524 SET_BOTH(CRX_READ);
11525 else
11526 SET_BOTH(CRX_WRITE);
11527 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11528 break;
11529 case VMX_EXIT_MOV_DRX:
11530 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11531 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11532 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11533 SET_BOTH(DRX_READ);
11534 else
11535 SET_BOTH(DRX_WRITE);
11536 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11537 break;
11538 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11539 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11540 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11541 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11542 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11543 case VMX_EXIT_GDTR_IDTR_ACCESS:
11544 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11545 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11546 {
11547 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11548 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11549 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11550 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11551 }
11552 break;
11553
11554 case VMX_EXIT_LDTR_TR_ACCESS:
11555 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11556 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11557 {
11558 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11559 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11560 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11561 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11562 }
11563 break;
11564
11565 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11566 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11567 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11568 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11569 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11570 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11571 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11572 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11573 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11574 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11575 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11576
11577 /* Events that aren't relevant at this point. */
11578 case VMX_EXIT_EXT_INT:
11579 case VMX_EXIT_INT_WINDOW:
11580 case VMX_EXIT_NMI_WINDOW:
11581 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11582 case VMX_EXIT_PREEMPT_TIMER:
11583 case VMX_EXIT_IO_INSTR:
11584 break;
11585
11586 /* Errors and unexpected events. */
11587 case VMX_EXIT_INIT_SIGNAL:
11588 case VMX_EXIT_SIPI:
11589 case VMX_EXIT_IO_SMI:
11590 case VMX_EXIT_SMI:
11591 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11592 case VMX_EXIT_ERR_MSR_LOAD:
11593 case VMX_EXIT_ERR_MACHINE_CHECK:
11594 case VMX_EXIT_PML_FULL:
11595 case VMX_EXIT_VIRTUALIZED_EOI:
11596 break;
11597
11598 default:
11599 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11600 break;
11601 }
11602#undef SET_BOTH
11603#undef SET_EXIT
11604
11605 /*
11606 * Dtrace tracepoints go first. We do them here at once so we don't
11607 * have to copy the guest state saving and stuff a few dozen times.
11608 * Down side is that we've got to repeat the switch, though this time
11609 * we use enmEvent since the probes are a subset of what DBGF does.
11610 */
11611 if (fDtrace1 || fDtrace2)
11612 {
11613 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11614 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11615 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11616 switch (enmEvent1)
11617 {
11618 /** @todo consider which extra parameters would be helpful for each probe. */
11619 case DBGFEVENT_END: break;
11620 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11621 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11622 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11623 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11624 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11625 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11626 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11627 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11628 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11629 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11630 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11631 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11632 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11633 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11634 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11635 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11636 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11637 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11638 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11639 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11640 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11646 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11647 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11648 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11649 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11650 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11651 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11652 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11653 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11654 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11661 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11662 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11663 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11664 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11665 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11666 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11667 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11668 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11669 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11670 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11671 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11672 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11673 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11674 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11675 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11676 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11677 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11678 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11679 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11680 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11681 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11682 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11683 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11684 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11685 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11686 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11687 }
11688 switch (enmEvent2)
11689 {
11690 /** @todo consider which extra parameters would be helpful for each probe. */
11691 case DBGFEVENT_END: break;
11692 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11694 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11700 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11701 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11702 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11703 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11704 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11705 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11706 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11707 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11708 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11715 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11716 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11717 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11718 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11719 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11720 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11721 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11725 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11726 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11727 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11728 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11729 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11730 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11731 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11732 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11733 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11734 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11735 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11736 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11737 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11738 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11740 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11742 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11743 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11744 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11745 }
11746 }
11747
11748 /*
11749 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11750 * the DBGF call will do a full check).
11751 *
11752 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11753 * Note! If we have to events, we prioritize the first, i.e. the instruction
11754 * one, in order to avoid event nesting.
11755 */
11756 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11757 if ( enmEvent1 != DBGFEVENT_END
11758 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11759 {
11760 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11761 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11762 if (rcStrict != VINF_SUCCESS)
11763 return rcStrict;
11764 }
11765 else if ( enmEvent2 != DBGFEVENT_END
11766 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11767 {
11768 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11769 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11770 if (rcStrict != VINF_SUCCESS)
11771 return rcStrict;
11772 }
11773
11774 return VINF_SUCCESS;
11775}
11776
11777
11778/**
11779 * Single-stepping VM-exit filtering.
11780 *
11781 * This is preprocessing the VM-exits and deciding whether we've gotten far
11782 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11783 * handling is performed.
11784 *
11785 * @returns Strict VBox status code (i.e. informational status codes too).
11786 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11787 * @param pVmxTransient The VMX-transient structure.
11788 * @param pDbgState The debug state.
11789 */
11790DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11791{
11792 /*
11793 * Expensive (saves context) generic dtrace VM-exit probe.
11794 */
11795 uint32_t const uExitReason = pVmxTransient->uExitReason;
11796 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11797 { /* more likely */ }
11798 else
11799 {
11800 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11801 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11802 AssertRC(rc);
11803 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11804 }
11805
11806#ifndef IN_NEM_DARWIN
11807 /*
11808 * Check for host NMI, just to get that out of the way.
11809 */
11810 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11811 { /* normally likely */ }
11812 else
11813 {
11814 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11815 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11816 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11817 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11818 }
11819#endif
11820
11821 /*
11822 * Check for single stepping event if we're stepping.
11823 */
11824 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11825 {
11826 switch (uExitReason)
11827 {
11828 case VMX_EXIT_MTF:
11829 return vmxHCExitMtf(pVCpu, pVmxTransient);
11830
11831 /* Various events: */
11832 case VMX_EXIT_XCPT_OR_NMI:
11833 case VMX_EXIT_EXT_INT:
11834 case VMX_EXIT_TRIPLE_FAULT:
11835 case VMX_EXIT_INT_WINDOW:
11836 case VMX_EXIT_NMI_WINDOW:
11837 case VMX_EXIT_TASK_SWITCH:
11838 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11839 case VMX_EXIT_APIC_ACCESS:
11840 case VMX_EXIT_EPT_VIOLATION:
11841 case VMX_EXIT_EPT_MISCONFIG:
11842 case VMX_EXIT_PREEMPT_TIMER:
11843
11844 /* Instruction specific VM-exits: */
11845 case VMX_EXIT_CPUID:
11846 case VMX_EXIT_GETSEC:
11847 case VMX_EXIT_HLT:
11848 case VMX_EXIT_INVD:
11849 case VMX_EXIT_INVLPG:
11850 case VMX_EXIT_RDPMC:
11851 case VMX_EXIT_RDTSC:
11852 case VMX_EXIT_RSM:
11853 case VMX_EXIT_VMCALL:
11854 case VMX_EXIT_VMCLEAR:
11855 case VMX_EXIT_VMLAUNCH:
11856 case VMX_EXIT_VMPTRLD:
11857 case VMX_EXIT_VMPTRST:
11858 case VMX_EXIT_VMREAD:
11859 case VMX_EXIT_VMRESUME:
11860 case VMX_EXIT_VMWRITE:
11861 case VMX_EXIT_VMXOFF:
11862 case VMX_EXIT_VMXON:
11863 case VMX_EXIT_MOV_CRX:
11864 case VMX_EXIT_MOV_DRX:
11865 case VMX_EXIT_IO_INSTR:
11866 case VMX_EXIT_RDMSR:
11867 case VMX_EXIT_WRMSR:
11868 case VMX_EXIT_MWAIT:
11869 case VMX_EXIT_MONITOR:
11870 case VMX_EXIT_PAUSE:
11871 case VMX_EXIT_GDTR_IDTR_ACCESS:
11872 case VMX_EXIT_LDTR_TR_ACCESS:
11873 case VMX_EXIT_INVEPT:
11874 case VMX_EXIT_RDTSCP:
11875 case VMX_EXIT_INVVPID:
11876 case VMX_EXIT_WBINVD:
11877 case VMX_EXIT_XSETBV:
11878 case VMX_EXIT_RDRAND:
11879 case VMX_EXIT_INVPCID:
11880 case VMX_EXIT_VMFUNC:
11881 case VMX_EXIT_RDSEED:
11882 case VMX_EXIT_XSAVES:
11883 case VMX_EXIT_XRSTORS:
11884 {
11885 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11886 AssertRCReturn(rc, rc);
11887 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11888 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11889 return VINF_EM_DBG_STEPPED;
11890 break;
11891 }
11892
11893 /* Errors and unexpected events: */
11894 case VMX_EXIT_INIT_SIGNAL:
11895 case VMX_EXIT_SIPI:
11896 case VMX_EXIT_IO_SMI:
11897 case VMX_EXIT_SMI:
11898 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11899 case VMX_EXIT_ERR_MSR_LOAD:
11900 case VMX_EXIT_ERR_MACHINE_CHECK:
11901 case VMX_EXIT_PML_FULL:
11902 case VMX_EXIT_VIRTUALIZED_EOI:
11903 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11904 break;
11905
11906 default:
11907 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11908 break;
11909 }
11910 }
11911
11912 /*
11913 * Check for debugger event breakpoints and dtrace probes.
11914 */
11915 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11916 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11917 {
11918 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11919 if (rcStrict != VINF_SUCCESS)
11920 return rcStrict;
11921 }
11922
11923 /*
11924 * Normal processing.
11925 */
11926#ifdef HMVMX_USE_FUNCTION_TABLE
11927 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11928#else
11929 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11930#endif
11931}
11932
11933/** @} */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette