VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 93132

最後變更 在這個檔案從93132是 93132,由 vboxsync 提交於 3 年 前

VMM,{HMVMXR0.cpp,VMXTemplate.cpp.h}: Make use of the VMX template code in HM, getting rid of the temporary code duplication, bugref:10136

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 428.5 KB
 
1/* $Id: VMXAllTemplate.cpp.h 93132 2022-01-06 12:38:02Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifndef IN_NEM_DARWIN
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252#endif
253static FNVMXEXITHANDLER vmxHCExitRdtsc;
254static FNVMXEXITHANDLER vmxHCExitMovCRx;
255static FNVMXEXITHANDLER vmxHCExitMovDRx;
256static FNVMXEXITHANDLER vmxHCExitIoInstr;
257static FNVMXEXITHANDLER vmxHCExitRdmsr;
258static FNVMXEXITHANDLER vmxHCExitWrmsr;
259static FNVMXEXITHANDLER vmxHCExitMwait;
260static FNVMXEXITHANDLER vmxHCExitMtf;
261static FNVMXEXITHANDLER vmxHCExitMonitor;
262static FNVMXEXITHANDLER vmxHCExitPause;
263static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
264static FNVMXEXITHANDLER vmxHCExitApicAccess;
265static FNVMXEXITHANDLER vmxHCExitEptViolation;
266static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
267static FNVMXEXITHANDLER vmxHCExitRdtscp;
268static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
269static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
270static FNVMXEXITHANDLER vmxHCExitXsetbv;
271static FNVMXEXITHANDLER vmxHCExitInvpcid;
272static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
273static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
274static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
275/** @} */
276
277#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
278/** @name Nested-guest VM-exit handler prototypes.
279 * @{
280 */
281static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
282static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
283static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
284static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
285static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
286static FNVMXEXITHANDLER vmxHCExitHltNested;
287static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
288static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
289static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
290static FNVMXEXITHANDLER vmxHCExitRdtscNested;
291static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
292static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
293static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
294static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
295static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
296static FNVMXEXITHANDLER vmxHCExitMwaitNested;
297static FNVMXEXITHANDLER vmxHCExitMtfNested;
298static FNVMXEXITHANDLER vmxHCExitMonitorNested;
299static FNVMXEXITHANDLER vmxHCExitPauseNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
301static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
302static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
303static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
304static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
305static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
306static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
307static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
308static FNVMXEXITHANDLER vmxHCExitInstrNested;
309static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
310/** @} */
311#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
312
313
314/*********************************************************************************************************************************
315* Global Variables *
316*********************************************************************************************************************************/
317#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
318/**
319 * Array of all VMCS fields.
320 * Any fields added to the VT-x spec. should be added here.
321 *
322 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
323 * of nested-guests.
324 */
325static const uint32_t g_aVmcsFields[] =
326{
327 /* 16-bit control fields. */
328 VMX_VMCS16_VPID,
329 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
330 VMX_VMCS16_EPTP_INDEX,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410
411 /* 64-bit read-only data fields. */
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
413 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
414
415 /* 64-bit guest-state fields. */
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
417 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
418 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
419 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
420 VMX_VMCS64_GUEST_PAT_FULL,
421 VMX_VMCS64_GUEST_PAT_HIGH,
422 VMX_VMCS64_GUEST_EFER_FULL,
423 VMX_VMCS64_GUEST_EFER_HIGH,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
425 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
426 VMX_VMCS64_GUEST_PDPTE0_FULL,
427 VMX_VMCS64_GUEST_PDPTE0_HIGH,
428 VMX_VMCS64_GUEST_PDPTE1_FULL,
429 VMX_VMCS64_GUEST_PDPTE1_HIGH,
430 VMX_VMCS64_GUEST_PDPTE2_FULL,
431 VMX_VMCS64_GUEST_PDPTE2_HIGH,
432 VMX_VMCS64_GUEST_PDPTE3_FULL,
433 VMX_VMCS64_GUEST_PDPTE3_HIGH,
434 VMX_VMCS64_GUEST_BNDCFGS_FULL,
435 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
436 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
437 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
438 VMX_VMCS64_GUEST_PKRS_FULL,
439 VMX_VMCS64_GUEST_PKRS_HIGH,
440
441 /* 64-bit host-state fields. */
442 VMX_VMCS64_HOST_PAT_FULL,
443 VMX_VMCS64_HOST_PAT_HIGH,
444 VMX_VMCS64_HOST_EFER_FULL,
445 VMX_VMCS64_HOST_EFER_HIGH,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
447 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
448 VMX_VMCS64_HOST_PKRS_FULL,
449 VMX_VMCS64_HOST_PKRS_HIGH,
450
451 /* 32-bit control fields. */
452 VMX_VMCS32_CTRL_PIN_EXEC,
453 VMX_VMCS32_CTRL_PROC_EXEC,
454 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
456 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
457 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
458 VMX_VMCS32_CTRL_EXIT,
459 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
460 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
461 VMX_VMCS32_CTRL_ENTRY,
462 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
463 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
464 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
465 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
466 VMX_VMCS32_CTRL_TPR_THRESHOLD,
467 VMX_VMCS32_CTRL_PROC_EXEC2,
468 VMX_VMCS32_CTRL_PLE_GAP,
469 VMX_VMCS32_CTRL_PLE_WINDOW,
470
471 /* 32-bits read-only fields. */
472 VMX_VMCS32_RO_VM_INSTR_ERROR,
473 VMX_VMCS32_RO_EXIT_REASON,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
475 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
476 VMX_VMCS32_RO_IDT_VECTORING_INFO,
477 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
478 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
479 VMX_VMCS32_RO_EXIT_INSTR_INFO,
480
481 /* 32-bit guest-state fields. */
482 VMX_VMCS32_GUEST_ES_LIMIT,
483 VMX_VMCS32_GUEST_CS_LIMIT,
484 VMX_VMCS32_GUEST_SS_LIMIT,
485 VMX_VMCS32_GUEST_DS_LIMIT,
486 VMX_VMCS32_GUEST_FS_LIMIT,
487 VMX_VMCS32_GUEST_GS_LIMIT,
488 VMX_VMCS32_GUEST_LDTR_LIMIT,
489 VMX_VMCS32_GUEST_TR_LIMIT,
490 VMX_VMCS32_GUEST_GDTR_LIMIT,
491 VMX_VMCS32_GUEST_IDTR_LIMIT,
492 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_INT_STATE,
501 VMX_VMCS32_GUEST_ACTIVITY_STATE,
502 VMX_VMCS32_GUEST_SMBASE,
503 VMX_VMCS32_GUEST_SYSENTER_CS,
504 VMX_VMCS32_PREEMPT_TIMER_VALUE,
505
506 /* 32-bit host-state fields. */
507 VMX_VMCS32_HOST_SYSENTER_CS,
508
509 /* Natural-width control fields. */
510 VMX_VMCS_CTRL_CR0_MASK,
511 VMX_VMCS_CTRL_CR4_MASK,
512 VMX_VMCS_CTRL_CR0_READ_SHADOW,
513 VMX_VMCS_CTRL_CR4_READ_SHADOW,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
517 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
518
519 /* Natural-width read-only data fields. */
520 VMX_VMCS_RO_EXIT_QUALIFICATION,
521 VMX_VMCS_RO_IO_RCX,
522 VMX_VMCS_RO_IO_RSI,
523 VMX_VMCS_RO_IO_RDI,
524 VMX_VMCS_RO_IO_RIP,
525 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
526
527 /* Natural-width guest-state field */
528 VMX_VMCS_GUEST_CR0,
529 VMX_VMCS_GUEST_CR3,
530 VMX_VMCS_GUEST_CR4,
531 VMX_VMCS_GUEST_ES_BASE,
532 VMX_VMCS_GUEST_CS_BASE,
533 VMX_VMCS_GUEST_SS_BASE,
534 VMX_VMCS_GUEST_DS_BASE,
535 VMX_VMCS_GUEST_FS_BASE,
536 VMX_VMCS_GUEST_GS_BASE,
537 VMX_VMCS_GUEST_LDTR_BASE,
538 VMX_VMCS_GUEST_TR_BASE,
539 VMX_VMCS_GUEST_GDTR_BASE,
540 VMX_VMCS_GUEST_IDTR_BASE,
541 VMX_VMCS_GUEST_DR7,
542 VMX_VMCS_GUEST_RSP,
543 VMX_VMCS_GUEST_RIP,
544 VMX_VMCS_GUEST_RFLAGS,
545 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
546 VMX_VMCS_GUEST_SYSENTER_ESP,
547 VMX_VMCS_GUEST_SYSENTER_EIP,
548 VMX_VMCS_GUEST_S_CET,
549 VMX_VMCS_GUEST_SSP,
550 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
551
552 /* Natural-width host-state fields */
553 VMX_VMCS_HOST_CR0,
554 VMX_VMCS_HOST_CR3,
555 VMX_VMCS_HOST_CR4,
556 VMX_VMCS_HOST_FS_BASE,
557 VMX_VMCS_HOST_GS_BASE,
558 VMX_VMCS_HOST_TR_BASE,
559 VMX_VMCS_HOST_GDTR_BASE,
560 VMX_VMCS_HOST_IDTR_BASE,
561 VMX_VMCS_HOST_SYSENTER_ESP,
562 VMX_VMCS_HOST_SYSENTER_EIP,
563 VMX_VMCS_HOST_RSP,
564 VMX_VMCS_HOST_RIP,
565 VMX_VMCS_HOST_S_CET,
566 VMX_VMCS_HOST_SSP,
567 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
568};
569#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
570
571#ifdef VBOX_STRICT
572static const uint32_t g_aVmcsSegBase[] =
573{
574 VMX_VMCS_GUEST_ES_BASE,
575 VMX_VMCS_GUEST_CS_BASE,
576 VMX_VMCS_GUEST_SS_BASE,
577 VMX_VMCS_GUEST_DS_BASE,
578 VMX_VMCS_GUEST_FS_BASE,
579 VMX_VMCS_GUEST_GS_BASE
580};
581static const uint32_t g_aVmcsSegSel[] =
582{
583 VMX_VMCS16_GUEST_ES_SEL,
584 VMX_VMCS16_GUEST_CS_SEL,
585 VMX_VMCS16_GUEST_SS_SEL,
586 VMX_VMCS16_GUEST_DS_SEL,
587 VMX_VMCS16_GUEST_FS_SEL,
588 VMX_VMCS16_GUEST_GS_SEL
589};
590static const uint32_t g_aVmcsSegLimit[] =
591{
592 VMX_VMCS32_GUEST_ES_LIMIT,
593 VMX_VMCS32_GUEST_CS_LIMIT,
594 VMX_VMCS32_GUEST_SS_LIMIT,
595 VMX_VMCS32_GUEST_DS_LIMIT,
596 VMX_VMCS32_GUEST_FS_LIMIT,
597 VMX_VMCS32_GUEST_GS_LIMIT
598};
599static const uint32_t g_aVmcsSegAttr[] =
600{
601 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
604 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
606 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
607};
608AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
609AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
610AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
611AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
612#endif /* VBOX_STRICT */
613
614#ifdef HMVMX_USE_FUNCTION_TABLE
615/**
616 * VMX_EXIT dispatch table.
617 */
618static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
619{
620 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
621 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
622 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
623 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
624 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
625 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
626 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
627 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
628 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
629 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
630 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
631 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
632 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
633 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
634 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
635 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
636 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
637 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
638 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
640 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
641 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
642 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
643 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
644 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
645 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
646 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
647 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
648 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
649#else
650 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
651 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
652 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
653 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
654 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
655 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
656 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
657 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
658 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
659#endif
660 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
661 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
662 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
663 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
664 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
665 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
666 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
667 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
669 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
670 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
671 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
672 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
673 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
674 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
675 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
676 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
677 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
678 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
679 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
680 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
681 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
684 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
685#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
686 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
687#else
688 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
689#endif
690 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
691 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
692 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
693 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
694 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
695 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
696 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
697 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
698 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
699 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
700 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
701 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
702 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
703 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
704 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
705 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
706};
707#endif /* HMVMX_USE_FUNCTION_TABLE */
708
709#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
710static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
711{
712 /* 0 */ "(Not Used)",
713 /* 1 */ "VMCALL executed in VMX root operation.",
714 /* 2 */ "VMCLEAR with invalid physical address.",
715 /* 3 */ "VMCLEAR with VMXON pointer.",
716 /* 4 */ "VMLAUNCH with non-clear VMCS.",
717 /* 5 */ "VMRESUME with non-launched VMCS.",
718 /* 6 */ "VMRESUME after VMXOFF",
719 /* 7 */ "VM-entry with invalid control fields.",
720 /* 8 */ "VM-entry with invalid host state fields.",
721 /* 9 */ "VMPTRLD with invalid physical address.",
722 /* 10 */ "VMPTRLD with VMXON pointer.",
723 /* 11 */ "VMPTRLD with incorrect revision identifier.",
724 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
725 /* 13 */ "VMWRITE to read-only VMCS component.",
726 /* 14 */ "(Not Used)",
727 /* 15 */ "VMXON executed in VMX root operation.",
728 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
729 /* 17 */ "VM-entry with non-launched executing VMCS.",
730 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
731 /* 19 */ "VMCALL with non-clear VMCS.",
732 /* 20 */ "VMCALL with invalid VM-exit control fields.",
733 /* 21 */ "(Not Used)",
734 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
735 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
736 /* 24 */ "VMCALL with invalid SMM-monitor features.",
737 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
738 /* 26 */ "VM-entry with events blocked by MOV SS.",
739 /* 27 */ "(Not Used)",
740 /* 28 */ "Invalid operand to INVEPT/INVVPID."
741};
742#endif /* VBOX_STRICT && LOG_ENABLED */
743
744
745/**
746 * Gets the CR0 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR0 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
759 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
760 *
761 * Furthermore, modifications to any bits that are reserved/unspecified currently
762 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
763 * when future CPUs specify and use currently reserved/unspecified bits.
764 */
765 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
766 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
767 * and @bugref{6944}. */
768 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
769 return ( X86_CR0_PE
770 | X86_CR0_NE
771 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
772 | X86_CR0_PG
773 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
774}
775
776
777/**
778 * Gets the CR4 guest/host mask.
779 *
780 * These bits typically does not change through the lifetime of a VM. Any bit set in
781 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
782 * by the guest.
783 *
784 * @returns The CR4 guest/host mask.
785 * @param pVCpu The cross context virtual CPU structure.
786 */
787static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
788{
789 /*
790 * We construct a mask of all CR4 bits that the guest can modify without causing
791 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
792 * a VM-exit when the guest attempts to modify them when executing using
793 * hardware-assisted VMX.
794 *
795 * When a feature is not exposed to the guest (and may be present on the host),
796 * we want to intercept guest modifications to the bit so we can emulate proper
797 * behavior (e.g., #GP).
798 *
799 * Furthermore, only modifications to those bits that don't require immediate
800 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
801 * depends on CR3 which might not always be the guest value while executing
802 * using hardware-assisted VMX.
803 */
804 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
805 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
806 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
807 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
808
809 /*
810 * Paranoia.
811 * Ensure features exposed to the guest are present on the host.
812 */
813 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
814 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
815 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
816
817 uint64_t const fGstMask = ( X86_CR4_PVI
818 | X86_CR4_TSD
819 | X86_CR4_DE
820 | X86_CR4_MCE
821 | X86_CR4_PCE
822 | X86_CR4_OSXMMEEXCPT
823 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
824 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
825 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
826 return ~fGstMask;
827}
828
829
830/**
831 * Adds one or more exceptions to the exception bitmap and commits it to the current
832 * VMCS.
833 *
834 * @param pVCpu The cross context virtual CPU structure.
835 * @param pVmxTransient The VMX-transient structure.
836 * @param uXcptMask The exception(s) to add.
837 */
838static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
839{
840 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
841 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
842 if ((uXcptBitmap & uXcptMask) != uXcptMask)
843 {
844 uXcptBitmap |= uXcptMask;
845 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
846 AssertRC(rc);
847 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
848 }
849}
850
851
852/**
853 * Adds an exception to the exception bitmap and commits it to the current VMCS.
854 *
855 * @param pVCpu The cross context virtual CPU structure.
856 * @param pVmxTransient The VMX-transient structure.
857 * @param uXcpt The exception to add.
858 */
859static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
860{
861 Assert(uXcpt <= X86_XCPT_LAST);
862 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
863}
864
865
866/**
867 * Remove one or more exceptions from the exception bitmap and commits it to the
868 * current VMCS.
869 *
870 * This takes care of not removing the exception intercept if a nested-guest
871 * requires the exception to be intercepted.
872 *
873 * @returns VBox status code.
874 * @param pVCpu The cross context virtual CPU structure.
875 * @param pVmxTransient The VMX-transient structure.
876 * @param uXcptMask The exception(s) to remove.
877 */
878static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
879{
880 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
881 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
882 if (u32XcptBitmap & uXcptMask)
883 {
884#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
885 if (!pVmxTransient->fIsNestedGuest)
886 { /* likely */ }
887 else
888 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
889#endif
890#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
891 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
892 | RT_BIT(X86_XCPT_DE)
893 | RT_BIT(X86_XCPT_NM)
894 | RT_BIT(X86_XCPT_TS)
895 | RT_BIT(X86_XCPT_UD)
896 | RT_BIT(X86_XCPT_NP)
897 | RT_BIT(X86_XCPT_SS)
898 | RT_BIT(X86_XCPT_GP)
899 | RT_BIT(X86_XCPT_PF)
900 | RT_BIT(X86_XCPT_MF));
901#elif defined(HMVMX_ALWAYS_TRAP_PF)
902 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
903#endif
904 if (uXcptMask)
905 {
906 /* Validate we are not removing any essential exception intercepts. */
907#ifndef IN_NEM_DARWIN
908 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
909#else
910 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
911#endif
912 NOREF(pVCpu);
913 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
914 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
915
916 /* Remove it from the exception bitmap. */
917 u32XcptBitmap &= ~uXcptMask;
918
919 /* Commit and update the cache if necessary. */
920 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
921 {
922 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
923 AssertRC(rc);
924 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
925 }
926 }
927 }
928 return VINF_SUCCESS;
929}
930
931
932/**
933 * Remove an exceptions from the exception bitmap and commits it to the current
934 * VMCS.
935 *
936 * @returns VBox status code.
937 * @param pVCpu The cross context virtual CPU structure.
938 * @param pVmxTransient The VMX-transient structure.
939 * @param uXcpt The exception to remove.
940 */
941static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
942{
943 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
944}
945
946
947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
948/**
949 * Loads the shadow VMCS specified by the VMCS info. object.
950 *
951 * @returns VBox status code.
952 * @param pVmcsInfo The VMCS info. object.
953 *
954 * @remarks Can be called with interrupts disabled.
955 */
956static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
957{
958 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
959 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
960
961 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
962 if (RT_SUCCESS(rc))
963 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
964 return rc;
965}
966
967
968/**
969 * Clears the shadow VMCS specified by the VMCS info. object.
970 *
971 * @returns VBox status code.
972 * @param pVmcsInfo The VMCS info. object.
973 *
974 * @remarks Can be called with interrupts disabled.
975 */
976static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
977{
978 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
979 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
980
981 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
982 if (RT_SUCCESS(rc))
983 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
984 return rc;
985}
986
987
988/**
989 * Switches from and to the specified VMCSes.
990 *
991 * @returns VBox status code.
992 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
993 * @param pVmcsInfoTo The VMCS info. object we are switching to.
994 *
995 * @remarks Called with interrupts disabled.
996 */
997static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
998{
999 /*
1000 * Clear the VMCS we are switching out if it has not already been cleared.
1001 * This will sync any CPU internal data back to the VMCS.
1002 */
1003 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1004 {
1005 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1006 if (RT_SUCCESS(rc))
1007 {
1008 /*
1009 * The shadow VMCS, if any, would not be active at this point since we
1010 * would have cleared it while importing the virtual hardware-virtualization
1011 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1012 * clear the shadow VMCS here, just assert for safety.
1013 */
1014 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1015 }
1016 else
1017 return rc;
1018 }
1019
1020 /*
1021 * Clear the VMCS we are switching to if it has not already been cleared.
1022 * This will initialize the VMCS launch state to "clear" required for loading it.
1023 *
1024 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1025 */
1026 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1027 {
1028 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1029 if (RT_SUCCESS(rc))
1030 { /* likely */ }
1031 else
1032 return rc;
1033 }
1034
1035 /*
1036 * Finally, load the VMCS we are switching to.
1037 */
1038 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1039}
1040
1041
1042/**
1043 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1044 * caller.
1045 *
1046 * @returns VBox status code.
1047 * @param pVCpu The cross context virtual CPU structure.
1048 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1049 * true) or guest VMCS (pass false).
1050 */
1051static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1052{
1053 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1054 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1055
1056 PVMXVMCSINFO pVmcsInfoFrom;
1057 PVMXVMCSINFO pVmcsInfoTo;
1058 if (fSwitchToNstGstVmcs)
1059 {
1060 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1061 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1062 }
1063 else
1064 {
1065 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1066 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1067 }
1068
1069 /*
1070 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1071 * preemption hook code path acquires the current VMCS.
1072 */
1073 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1074
1075 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1076 if (RT_SUCCESS(rc))
1077 {
1078 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1079 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1080
1081 /*
1082 * If we are switching to a VMCS that was executed on a different host CPU or was
1083 * never executed before, flag that we need to export the host state before executing
1084 * guest/nested-guest code using hardware-assisted VMX.
1085 *
1086 * This could probably be done in a preemptible context since the preemption hook
1087 * will flag the necessary change in host context. However, since preemption is
1088 * already disabled and to avoid making assumptions about host specific code in
1089 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1090 * disabled.
1091 */
1092 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1093 { /* likely */ }
1094 else
1095 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1096
1097 ASMSetFlags(fEFlags);
1098
1099 /*
1100 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1101 * flag that we need to update the host MSR values there. Even if we decide in the
1102 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1103 * if its content differs, we would have to update the host MSRs anyway.
1104 */
1105 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1106 }
1107 else
1108 ASMSetFlags(fEFlags);
1109 return rc;
1110}
1111#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1112
1113
1114#ifdef VBOX_STRICT
1115/**
1116 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1117 * transient structure.
1118 *
1119 * @param pVCpu The cross context virtual CPU structure.
1120 * @param pVmxTransient The VMX-transient structure.
1121 */
1122DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1123{
1124 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1125 AssertRC(rc);
1126}
1127
1128
1129/**
1130 * Reads the VM-entry exception error code field from the VMCS into
1131 * the VMX transient structure.
1132 *
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1137{
1138 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1139 AssertRC(rc);
1140}
1141
1142
1143/**
1144 * Reads the VM-entry exception error code field from the VMCS into
1145 * the VMX transient structure.
1146 *
1147 * @param pVCpu The cross context virtual CPU structure.
1148 * @param pVmxTransient The VMX-transient structure.
1149 */
1150DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1151{
1152 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1153 AssertRC(rc);
1154}
1155#endif /* VBOX_STRICT */
1156
1157
1158/**
1159 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1160 * transient structure.
1161 *
1162 * @param pVCpu The cross context virtual CPU structure.
1163 * @param pVmxTransient The VMX-transient structure.
1164 */
1165DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1166{
1167 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1168 {
1169 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1170 AssertRC(rc);
1171 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1172 }
1173}
1174
1175
1176/**
1177 * Reads the VM-exit interruption error code from the VMCS into the VMX
1178 * transient structure.
1179 *
1180 * @param pVCpu The cross context virtual CPU structure.
1181 * @param pVmxTransient The VMX-transient structure.
1182 */
1183DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1184{
1185 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1186 {
1187 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1188 AssertRC(rc);
1189 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1190 }
1191}
1192
1193
1194/**
1195 * Reads the VM-exit instruction length field from the VMCS into the VMX
1196 * transient structure.
1197 *
1198 * @param pVCpu The cross context virtual CPU structure.
1199 * @param pVmxTransient The VMX-transient structure.
1200 */
1201DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1202{
1203 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1204 {
1205 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1206 AssertRC(rc);
1207 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1208 }
1209}
1210
1211
1212/**
1213 * Reads the VM-exit instruction-information field from the VMCS into
1214 * the VMX transient structure.
1215 *
1216 * @param pVCpu The cross context virtual CPU structure.
1217 * @param pVmxTransient The VMX-transient structure.
1218 */
1219DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1220{
1221 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1222 {
1223 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1224 AssertRC(rc);
1225 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1226 }
1227}
1228
1229
1230/**
1231 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1232 *
1233 * @param pVCpu The cross context virtual CPU structure.
1234 * @param pVmxTransient The VMX-transient structure.
1235 */
1236DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1237{
1238 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1239 {
1240 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1241 AssertRC(rc);
1242 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1243 }
1244}
1245
1246
1247/**
1248 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1249 *
1250 * @param pVCpu The cross context virtual CPU structure.
1251 * @param pVmxTransient The VMX-transient structure.
1252 */
1253DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1254{
1255 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1256 {
1257 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1258 AssertRC(rc);
1259 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1260 }
1261}
1262
1263
1264/**
1265 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1266 *
1267 * @param pVCpu The cross context virtual CPU structure.
1268 * @param pVmxTransient The VMX-transient structure.
1269 */
1270DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1271{
1272 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1273 {
1274 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1275 AssertRC(rc);
1276 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1277 }
1278}
1279
1280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1281/**
1282 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1283 * structure.
1284 *
1285 * @param pVCpu The cross context virtual CPU structure.
1286 * @param pVmxTransient The VMX-transient structure.
1287 */
1288DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1289{
1290 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1291 {
1292 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1293 AssertRC(rc);
1294 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1295 }
1296}
1297#endif
1298
1299/**
1300 * Reads the IDT-vectoring information field from the VMCS into the VMX
1301 * transient structure.
1302 *
1303 * @param pVCpu The cross context virtual CPU structure.
1304 * @param pVmxTransient The VMX-transient structure.
1305 *
1306 * @remarks No-long-jump zone!!!
1307 */
1308DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1309{
1310 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1311 {
1312 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1313 AssertRC(rc);
1314 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1315 }
1316}
1317
1318
1319/**
1320 * Reads the IDT-vectoring error code from the VMCS into the VMX
1321 * transient structure.
1322 *
1323 * @param pVCpu The cross context virtual CPU structure.
1324 * @param pVmxTransient The VMX-transient structure.
1325 */
1326DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1327{
1328 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1329 {
1330 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1333 }
1334}
1335
1336#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1337/**
1338 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1339 *
1340 * @param pVCpu The cross context virtual CPU structure.
1341 * @param pVmxTransient The VMX-transient structure.
1342 */
1343static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1344{
1345 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1346 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1347 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1348 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1349 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1350 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1351 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1352 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1353 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1354 AssertRC(rc);
1355 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1356 | HMVMX_READ_EXIT_INSTR_LEN
1357 | HMVMX_READ_EXIT_INSTR_INFO
1358 | HMVMX_READ_IDT_VECTORING_INFO
1359 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1360 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1361 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1362 | HMVMX_READ_GUEST_LINEAR_ADDR
1363 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1364}
1365#endif
1366
1367/**
1368 * Verifies that our cached values of the VMCS fields are all consistent with
1369 * what's actually present in the VMCS.
1370 *
1371 * @returns VBox status code.
1372 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1373 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1374 * VMCS content. HMCPU error-field is
1375 * updated, see VMX_VCI_XXX.
1376 * @param pVCpu The cross context virtual CPU structure.
1377 * @param pVmcsInfo The VMCS info. object.
1378 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1379 */
1380static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1381{
1382 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1383
1384 uint32_t u32Val;
1385 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1386 AssertRC(rc);
1387 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1388 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1389 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1390 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1391
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1395 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1402 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405
1406 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1407 AssertRC(rc);
1408 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1409 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1410 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1411 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1412
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1418 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 uint64_t u64Val;
1424 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1425 {
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1429 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432 }
1433
1434 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1435 AssertRC(rc);
1436 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1437 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1438 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1439 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1440
1441 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1442 AssertRC(rc);
1443 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1444 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1445 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1446 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1447
1448 NOREF(pcszVmcs);
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1455 * VMCS.
1456 *
1457 * This is typically required when the guest changes paging mode.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param pVmxTransient The VMX-transient structure.
1462 *
1463 * @remarks Requires EFER.
1464 * @remarks No-long-jump zone!!!
1465 */
1466static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1467{
1468 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1469 {
1470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1471 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1472
1473 /*
1474 * VM-entry controls.
1475 */
1476 {
1477 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1478 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1479
1480 /*
1481 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1482 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1483 *
1484 * For nested-guests, this is a mandatory VM-entry control. It's also
1485 * required because we do not want to leak host bits to the nested-guest.
1486 */
1487 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1488
1489 /*
1490 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1491 *
1492 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1493 * required to get the nested-guest working with hardware-assisted VMX execution.
1494 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1495 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1496 * here rather than while merging the guest VMCS controls.
1497 */
1498 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1499 {
1500 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1501 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1502 }
1503 else
1504 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1505
1506 /*
1507 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1508 *
1509 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1510 * regardless of whether the nested-guest VMCS specifies it because we are free to
1511 * load whatever MSRs we require and we do not need to modify the guest visible copy
1512 * of the VM-entry MSR load area.
1513 */
1514 if ( g_fHmVmxSupportsVmcsEfer
1515#ifndef IN_NEM_DARWIN
1516 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1517#endif
1518 )
1519 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1520 else
1521 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1522
1523 /*
1524 * The following should -not- be set (since we're not in SMM mode):
1525 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1526 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1527 */
1528
1529 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1530 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1531
1532 if ((fVal & fZap) == fVal)
1533 { /* likely */ }
1534 else
1535 {
1536 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1537 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1538 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1539 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1540 }
1541
1542 /* Commit it to the VMCS. */
1543 if (pVmcsInfo->u32EntryCtls != fVal)
1544 {
1545 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1546 AssertRC(rc);
1547 pVmcsInfo->u32EntryCtls = fVal;
1548 }
1549 }
1550
1551 /*
1552 * VM-exit controls.
1553 */
1554 {
1555 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1556 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1557
1558 /*
1559 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1560 * supported the 1-setting of this bit.
1561 *
1562 * For nested-guests, we set the "save debug controls" as the converse
1563 * "load debug controls" is mandatory for nested-guests anyway.
1564 */
1565 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1566
1567 /*
1568 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1569 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1570 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1571 * vmxHCExportHostMsrs().
1572 *
1573 * For nested-guests, we always set this bit as we do not support 32-bit
1574 * hosts.
1575 */
1576 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1577
1578#ifndef IN_NEM_DARWIN
1579 /*
1580 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1581 *
1582 * For nested-guests, we should use the "save IA32_EFER" control if we also
1583 * used the "load IA32_EFER" control while exporting VM-entry controls.
1584 */
1585 if ( g_fHmVmxSupportsVmcsEfer
1586 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1587 {
1588 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1589 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1590 }
1591#endif
1592
1593 /*
1594 * Enable saving of the VMX-preemption timer value on VM-exit.
1595 * For nested-guests, currently not exposed/used.
1596 */
1597 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1598 * the timer value. */
1599 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1600 {
1601 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1602 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1603 }
1604
1605 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1606 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1607
1608 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1609 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1610 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1611
1612 if ((fVal & fZap) == fVal)
1613 { /* likely */ }
1614 else
1615 {
1616 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1617 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1618 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1619 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1620 }
1621
1622 /* Commit it to the VMCS. */
1623 if (pVmcsInfo->u32ExitCtls != fVal)
1624 {
1625 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1626 AssertRC(rc);
1627 pVmcsInfo->u32ExitCtls = fVal;
1628 }
1629 }
1630
1631 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1632 }
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Sets the TPR threshold in the VMCS.
1639 *
1640 * @param pVCpu The cross context virtual CPU structure.
1641 * @param pVmcsInfo The VMCS info. object.
1642 * @param u32TprThreshold The TPR threshold (task-priority class only).
1643 */
1644DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1645{
1646 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1647 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1648 RT_NOREF(pVmcsInfo);
1649 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1650 AssertRC(rc);
1651}
1652
1653
1654/**
1655 * Exports the guest APIC TPR state into the VMCS.
1656 *
1657 * @param pVCpu The cross context virtual CPU structure.
1658 * @param pVmxTransient The VMX-transient structure.
1659 *
1660 * @remarks No-long-jump zone!!!
1661 */
1662static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1663{
1664 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1665 {
1666 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1667
1668 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1669 if (!pVmxTransient->fIsNestedGuest)
1670 {
1671 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1672 && APICIsEnabled(pVCpu))
1673 {
1674 /*
1675 * Setup TPR shadowing.
1676 */
1677 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1678 {
1679 bool fPendingIntr = false;
1680 uint8_t u8Tpr = 0;
1681 uint8_t u8PendingIntr = 0;
1682 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1683 AssertRC(rc);
1684
1685 /*
1686 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1687 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1688 * priority of the pending interrupt so we can deliver the interrupt. If there
1689 * are no interrupts pending, set threshold to 0 to not cause any
1690 * TPR-below-threshold VM-exits.
1691 */
1692 uint32_t u32TprThreshold = 0;
1693 if (fPendingIntr)
1694 {
1695 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1696 (which is the Task-Priority Class). */
1697 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1698 const uint8_t u8TprPriority = u8Tpr >> 4;
1699 if (u8PendingPriority <= u8TprPriority)
1700 u32TprThreshold = u8PendingPriority;
1701 }
1702
1703 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1704 }
1705 }
1706 }
1707 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1708 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1709 }
1710}
1711
1712
1713/**
1714 * Gets the guest interruptibility-state and updates related force-flags.
1715 *
1716 * @returns Guest's interruptibility-state.
1717 * @param pVCpu The cross context virtual CPU structure.
1718 *
1719 * @remarks No-long-jump zone!!!
1720 */
1721static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1722{
1723 /*
1724 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1725 */
1726 uint32_t fIntrState = 0;
1727 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1728 {
1729 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1730 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1731
1732 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1733 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1734 {
1735 if (pCtx->eflags.Bits.u1IF)
1736 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1737 else
1738 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1739 }
1740 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1741 {
1742 /*
1743 * We can clear the inhibit force flag as even if we go back to the recompiler
1744 * without executing guest code in VT-x, the flag's condition to be cleared is
1745 * met and thus the cleared state is correct.
1746 */
1747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1748 }
1749 }
1750
1751 /*
1752 * Check if we should inhibit NMI delivery.
1753 */
1754 if (CPUMIsGuestNmiBlocking(pVCpu))
1755 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1756
1757 /*
1758 * Validate.
1759 */
1760#ifdef VBOX_STRICT
1761 /* We don't support block-by-SMI yet.*/
1762 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1763
1764 /* Block-by-STI must not be set when interrupts are disabled. */
1765 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1766 {
1767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1768 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1769 }
1770#endif
1771
1772 return fIntrState;
1773}
1774
1775
1776/**
1777 * Exports the exception intercepts required for guest execution in the VMCS.
1778 *
1779 * @param pVCpu The cross context virtual CPU structure.
1780 * @param pVmxTransient The VMX-transient structure.
1781 *
1782 * @remarks No-long-jump zone!!!
1783 */
1784static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1785{
1786 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1787 {
1788 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1789 if ( !pVmxTransient->fIsNestedGuest
1790 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1791 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1792 else
1793 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1794
1795 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1796 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1797 }
1798}
1799
1800
1801/**
1802 * Exports the guest's RIP into the guest-state area in the VMCS.
1803 *
1804 * @param pVCpu The cross context virtual CPU structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1813
1814 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1815 AssertRC(rc);
1816
1817 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1818 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1819 }
1820}
1821
1822
1823/**
1824 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1825 *
1826 * @param pVCpu The cross context virtual CPU structure.
1827 * @param pVmxTransient The VMX-transient structure.
1828 *
1829 * @remarks No-long-jump zone!!!
1830 */
1831static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1832{
1833 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1834 {
1835 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1836
1837 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1838 Let us assert it as such and use 32-bit VMWRITE. */
1839 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1840 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1841 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1842 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1843
1844#ifndef IN_NEM_DARWIN
1845 /*
1846 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1847 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1848 * can run the real-mode guest code under Virtual 8086 mode.
1849 */
1850 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1851 if (pVmcsInfo->RealMode.fRealOnV86Active)
1852 {
1853 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1854 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1855 Assert(!pVmxTransient->fIsNestedGuest);
1856 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1857 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1858 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1859 }
1860#else
1861 RT_NOREF(pVmxTransient);
1862#endif
1863
1864 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1865 AssertRC(rc);
1866
1867 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1868 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1869 }
1870}
1871
1872
1873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1874/**
1875 * Copies the nested-guest VMCS to the shadow VMCS.
1876 *
1877 * @returns VBox status code.
1878 * @param pVCpu The cross context virtual CPU structure.
1879 * @param pVmcsInfo The VMCS info. object.
1880 *
1881 * @remarks No-long-jump zone!!!
1882 */
1883static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1884{
1885 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1886 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1887
1888 /*
1889 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1890 * current VMCS, as we may try saving guest lazy MSRs.
1891 *
1892 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1893 * calling the import VMCS code which is currently performing the guest MSR reads
1894 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1895 * and the rest of the VMX leave session machinery.
1896 */
1897 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1898
1899 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1900 if (RT_SUCCESS(rc))
1901 {
1902 /*
1903 * Copy all guest read/write VMCS fields.
1904 *
1905 * We don't check for VMWRITE failures here for performance reasons and
1906 * because they are not expected to fail, barring irrecoverable conditions
1907 * like hardware errors.
1908 */
1909 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1910 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1911 {
1912 uint64_t u64Val;
1913 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1914 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1915 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1916 }
1917
1918 /*
1919 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1920 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1921 */
1922 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1923 {
1924 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1925 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1926 {
1927 uint64_t u64Val;
1928 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1929 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1930 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1931 }
1932 }
1933
1934 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1935 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1936 }
1937
1938 ASMSetFlags(fEFlags);
1939 return rc;
1940}
1941
1942
1943/**
1944 * Copies the shadow VMCS to the nested-guest VMCS.
1945 *
1946 * @returns VBox status code.
1947 * @param pVCpu The cross context virtual CPU structure.
1948 * @param pVmcsInfo The VMCS info. object.
1949 *
1950 * @remarks Called with interrupts disabled.
1951 */
1952static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1953{
1954 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1955 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1956 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1957
1958 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1959 if (RT_SUCCESS(rc))
1960 {
1961 /*
1962 * Copy guest read/write fields from the shadow VMCS.
1963 * Guest read-only fields cannot be modified, so no need to copy them.
1964 *
1965 * We don't check for VMREAD failures here for performance reasons and
1966 * because they are not expected to fail, barring irrecoverable conditions
1967 * like hardware errors.
1968 */
1969 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1970 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1971 {
1972 uint64_t u64Val;
1973 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1974 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1975 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1976 }
1977
1978 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1979 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1980 }
1981 return rc;
1982}
1983
1984
1985/**
1986 * Enables VMCS shadowing for the given VMCS info. object.
1987 *
1988 * @param pVmcsInfo The VMCS info. object.
1989 *
1990 * @remarks No-long-jump zone!!!
1991 */
1992static void vmxHCEnableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
1993{
1994 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1995 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1996 {
1997 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1998 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1999 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2000 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2001 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2002 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2003 Log4Func(("Enabled\n"));
2004 }
2005}
2006
2007
2008/**
2009 * Disables VMCS shadowing for the given VMCS info. object.
2010 *
2011 * @param pVmcsInfo The VMCS info. object.
2012 *
2013 * @remarks No-long-jump zone!!!
2014 */
2015static void vmxHCDisableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
2016{
2017 /*
2018 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2019 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2020 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2021 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2022 *
2023 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2024 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2025 */
2026 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2027 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2028 {
2029 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2030 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2031 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2032 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2033 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2034 Log4Func(("Disabled\n"));
2035 }
2036}
2037#endif
2038
2039
2040/**
2041 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2042 *
2043 * The guest FPU state is always pre-loaded hence we don't need to bother about
2044 * sharing FPU related CR0 bits between the guest and host.
2045 *
2046 * @returns VBox status code.
2047 * @param pVCpu The cross context virtual CPU structure.
2048 * @param pVmxTransient The VMX-transient structure.
2049 *
2050 * @remarks No-long-jump zone!!!
2051 */
2052static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2053{
2054 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2055 {
2056 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2057 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2058
2059 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2060 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2061 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2062 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2063 else
2064 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2065
2066 if (!pVmxTransient->fIsNestedGuest)
2067 {
2068 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2069 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2070 uint64_t const u64ShadowCr0 = u64GuestCr0;
2071 Assert(!RT_HI_U32(u64GuestCr0));
2072
2073 /*
2074 * Setup VT-x's view of the guest CR0.
2075 */
2076 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2077 if (VM_IS_VMX_NESTED_PAGING(pVM))
2078 {
2079 if (CPUMIsGuestPagingEnabled(pVCpu))
2080 {
2081 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2082 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2083 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2084 }
2085 else
2086 {
2087 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2088 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2089 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2090 }
2091
2092 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2093 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2094 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2095 }
2096 else
2097 {
2098 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2099 u64GuestCr0 |= X86_CR0_WP;
2100 }
2101
2102 /*
2103 * Guest FPU bits.
2104 *
2105 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2106 * using CR0.TS.
2107 *
2108 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2109 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2110 */
2111 u64GuestCr0 |= X86_CR0_NE;
2112
2113 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2114 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2115
2116 /*
2117 * Update exception intercepts.
2118 */
2119 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2120#ifndef IN_NEM_DARWIN
2121 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2122 {
2123 Assert(PDMVmmDevHeapIsEnabled(pVM));
2124 Assert(pVM->hm.s.vmx.pRealModeTSS);
2125 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2126 }
2127 else
2128#endif
2129 {
2130 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2131 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2132 if (fInterceptMF)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2134 }
2135
2136 /* Additional intercepts for debugging, define these yourself explicitly. */
2137#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2138 uXcptBitmap |= 0
2139 | RT_BIT(X86_XCPT_BP)
2140 | RT_BIT(X86_XCPT_DE)
2141 | RT_BIT(X86_XCPT_NM)
2142 | RT_BIT(X86_XCPT_TS)
2143 | RT_BIT(X86_XCPT_UD)
2144 | RT_BIT(X86_XCPT_NP)
2145 | RT_BIT(X86_XCPT_SS)
2146 | RT_BIT(X86_XCPT_GP)
2147 | RT_BIT(X86_XCPT_PF)
2148 | RT_BIT(X86_XCPT_MF)
2149 ;
2150#elif defined(HMVMX_ALWAYS_TRAP_PF)
2151 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2152#endif
2153 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2154 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2155 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2156
2157 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2158 u64GuestCr0 |= fSetCr0;
2159 u64GuestCr0 &= fZapCr0;
2160 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2161
2162 /* Commit the CR0 and related fields to the guest VMCS. */
2163 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2164 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2165 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2166 {
2167 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2168 AssertRC(rc);
2169 }
2170 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2171 {
2172 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2173 AssertRC(rc);
2174 }
2175
2176 /* Update our caches. */
2177 pVmcsInfo->u32ProcCtls = uProcCtls;
2178 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2179
2180 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2181 }
2182 else
2183 {
2184 /*
2185 * With nested-guests, we may have extended the guest/host mask here since we
2186 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2187 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2188 * originally supplied. We must copy those bits from the nested-guest CR0 into
2189 * the nested-guest CR0 read-shadow.
2190 */
2191 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2192 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2193 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2194 Assert(!RT_HI_U32(u64GuestCr0));
2195 Assert(u64GuestCr0 & X86_CR0_NE);
2196
2197 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2198 u64GuestCr0 |= fSetCr0;
2199 u64GuestCr0 &= fZapCr0;
2200 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2201
2202 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2203 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2204 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2205
2206 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2207 }
2208
2209 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2210 }
2211
2212 return VINF_SUCCESS;
2213}
2214
2215
2216/**
2217 * Exports the guest control registers (CR3, CR4) into the guest-state area
2218 * in the VMCS.
2219 *
2220 * @returns VBox strict status code.
2221 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2222 * without unrestricted guest access and the VMMDev is not presently
2223 * mapped (e.g. EFI32).
2224 *
2225 * @param pVCpu The cross context virtual CPU structure.
2226 * @param pVmxTransient The VMX-transient structure.
2227 *
2228 * @remarks No-long-jump zone!!!
2229 */
2230static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2231{
2232 int rc = VINF_SUCCESS;
2233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2234
2235 /*
2236 * Guest CR2.
2237 * It's always loaded in the assembler code. Nothing to do here.
2238 */
2239
2240 /*
2241 * Guest CR3.
2242 */
2243 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2244 {
2245 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2246
2247 if (VM_IS_VMX_NESTED_PAGING(pVM))
2248 {
2249#ifndef IN_NEM_DARWIN
2250 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2251 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2252
2253 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2254 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2255 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2256 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2257
2258 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2259 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2260 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2261
2262 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2263 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2264 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2265 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2266 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2267 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2268 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2269
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2271 AssertRC(rc);
2272#endif
2273
2274 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2275 uint64_t u64GuestCr3 = pCtx->cr3;
2276 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2277 || CPUMIsGuestPagingEnabledEx(pCtx))
2278 {
2279 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2280 if (CPUMIsGuestInPAEModeEx(pCtx))
2281 {
2282 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2283 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2285 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2286 }
2287
2288 /*
2289 * The guest's view of its CR3 is unblemished with nested paging when the
2290 * guest is using paging or we have unrestricted guest execution to handle
2291 * the guest when it's not using paging.
2292 */
2293 }
2294#ifndef IN_NEM_DARWIN
2295 else
2296 {
2297 /*
2298 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2299 * thinks it accesses physical memory directly, we use our identity-mapped
2300 * page table to map guest-linear to guest-physical addresses. EPT takes care
2301 * of translating it to host-physical addresses.
2302 */
2303 RTGCPHYS GCPhys;
2304 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2305
2306 /* We obtain it here every time as the guest could have relocated this PCI region. */
2307 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2308 if (RT_SUCCESS(rc))
2309 { /* likely */ }
2310 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2311 {
2312 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2313 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2314 }
2315 else
2316 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2317
2318 u64GuestCr3 = GCPhys;
2319 }
2320#endif
2321
2322 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2323 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2324 AssertRC(rc);
2325 }
2326 else
2327 {
2328 Assert(!pVmxTransient->fIsNestedGuest);
2329 /* Non-nested paging case, just use the hypervisor's CR3. */
2330 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2331
2332 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2333 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2334 AssertRC(rc);
2335 }
2336
2337 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2338 }
2339
2340 /*
2341 * Guest CR4.
2342 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2343 */
2344 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2345 {
2346 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2347 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2348
2349 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2350 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2351
2352 /*
2353 * With nested-guests, we may have extended the guest/host mask here (since we
2354 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2355 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2356 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2357 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2358 */
2359 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2360 uint64_t u64GuestCr4 = pCtx->cr4;
2361 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2362 ? pCtx->cr4
2363 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2364 Assert(!RT_HI_U32(u64GuestCr4));
2365
2366#ifndef IN_NEM_DARWIN
2367 /*
2368 * Setup VT-x's view of the guest CR4.
2369 *
2370 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2371 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2372 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2373 *
2374 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2375 */
2376 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2377 {
2378 Assert(pVM->hm.s.vmx.pRealModeTSS);
2379 Assert(PDMVmmDevHeapIsEnabled(pVM));
2380 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2381 }
2382#endif
2383
2384 if (VM_IS_VMX_NESTED_PAGING(pVM))
2385 {
2386 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2387 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2388 {
2389 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2390 u64GuestCr4 |= X86_CR4_PSE;
2391 /* Our identity mapping is a 32-bit page directory. */
2392 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2393 }
2394 /* else use guest CR4.*/
2395 }
2396 else
2397 {
2398 Assert(!pVmxTransient->fIsNestedGuest);
2399
2400 /*
2401 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2402 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2403 */
2404 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2405 {
2406 case PGMMODE_REAL: /* Real-mode. */
2407 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2408 case PGMMODE_32_BIT: /* 32-bit paging. */
2409 {
2410 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2411 break;
2412 }
2413
2414 case PGMMODE_PAE: /* PAE paging. */
2415 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2416 {
2417 u64GuestCr4 |= X86_CR4_PAE;
2418 break;
2419 }
2420
2421 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2422 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2423 {
2424#ifdef VBOX_WITH_64_BITS_GUESTS
2425 /* For our assumption in vmxHCShouldSwapEferMsr. */
2426 Assert(u64GuestCr4 & X86_CR4_PAE);
2427 break;
2428#endif
2429 }
2430 default:
2431 AssertFailed();
2432 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2433 }
2434 }
2435
2436 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2437 u64GuestCr4 |= fSetCr4;
2438 u64GuestCr4 &= fZapCr4;
2439
2440 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2441 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2442 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2443
2444#ifndef IN_NEM_DARWIN
2445 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2446 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2447 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2448 {
2449 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2450 hmR0VmxUpdateStartVmFunction(pVCpu);
2451 }
2452#endif
2453
2454 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2455
2456 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2457 }
2458 return rc;
2459}
2460
2461
2462#ifdef VBOX_STRICT
2463/**
2464 * Strict function to validate segment registers.
2465 *
2466 * @param pVCpu The cross context virtual CPU structure.
2467 * @param pVmcsInfo The VMCS info. object.
2468 *
2469 * @remarks Will import guest CR0 on strict builds during validation of
2470 * segments.
2471 */
2472static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2473{
2474 /*
2475 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2476 *
2477 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2478 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2479 * unusable bit and doesn't change the guest-context value.
2480 */
2481 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2482 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2483 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2484 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2485 && ( !CPUMIsGuestInRealModeEx(pCtx)
2486 && !CPUMIsGuestInV86ModeEx(pCtx)))
2487 {
2488 /* Protected mode checks */
2489 /* CS */
2490 Assert(pCtx->cs.Attr.n.u1Present);
2491 Assert(!(pCtx->cs.Attr.u & 0xf00));
2492 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2493 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2494 || !(pCtx->cs.Attr.n.u1Granularity));
2495 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2496 || (pCtx->cs.Attr.n.u1Granularity));
2497 /* CS cannot be loaded with NULL in protected mode. */
2498 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2499 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2500 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2501 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2502 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2503 else
2504 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2505 /* SS */
2506 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2507 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2508 if ( !(pCtx->cr0 & X86_CR0_PE)
2509 || pCtx->cs.Attr.n.u4Type == 3)
2510 {
2511 Assert(!pCtx->ss.Attr.n.u2Dpl);
2512 }
2513 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2514 {
2515 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2516 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2517 Assert(pCtx->ss.Attr.n.u1Present);
2518 Assert(!(pCtx->ss.Attr.u & 0xf00));
2519 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2520 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2521 || !(pCtx->ss.Attr.n.u1Granularity));
2522 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2523 || (pCtx->ss.Attr.n.u1Granularity));
2524 }
2525 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2526 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2527 {
2528 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2529 Assert(pCtx->ds.Attr.n.u1Present);
2530 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2531 Assert(!(pCtx->ds.Attr.u & 0xf00));
2532 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2533 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2534 || !(pCtx->ds.Attr.n.u1Granularity));
2535 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2536 || (pCtx->ds.Attr.n.u1Granularity));
2537 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2538 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2539 }
2540 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->es.Attr.n.u1Present);
2544 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->es.Attr.u & 0xf00));
2546 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->es.Attr.n.u1Granularity));
2549 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2550 || (pCtx->es.Attr.n.u1Granularity));
2551 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2555 {
2556 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2557 Assert(pCtx->fs.Attr.n.u1Present);
2558 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2559 Assert(!(pCtx->fs.Attr.u & 0xf00));
2560 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2561 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2562 || !(pCtx->fs.Attr.n.u1Granularity));
2563 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2564 || (pCtx->fs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2567 }
2568 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2569 {
2570 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2571 Assert(pCtx->gs.Attr.n.u1Present);
2572 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2573 Assert(!(pCtx->gs.Attr.u & 0xf00));
2574 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2575 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2576 || !(pCtx->gs.Attr.n.u1Granularity));
2577 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2578 || (pCtx->gs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2580 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2581 }
2582 /* 64-bit capable CPUs. */
2583 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2584 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2585 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2586 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2587 }
2588 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2589 || ( CPUMIsGuestInRealModeEx(pCtx)
2590 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2591 {
2592 /* Real and v86 mode checks. */
2593 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2594 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2595#ifndef IN_NEM_DARWIN
2596 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2597 {
2598 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2599 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2600 }
2601 else
2602#endif
2603 {
2604 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2605 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2606 }
2607
2608 /* CS */
2609 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2610 Assert(pCtx->cs.u32Limit == 0xffff);
2611 Assert(u32CSAttr == 0xf3);
2612 /* SS */
2613 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2614 Assert(pCtx->ss.u32Limit == 0xffff);
2615 Assert(u32SSAttr == 0xf3);
2616 /* DS */
2617 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2618 Assert(pCtx->ds.u32Limit == 0xffff);
2619 Assert(u32DSAttr == 0xf3);
2620 /* ES */
2621 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2622 Assert(pCtx->es.u32Limit == 0xffff);
2623 Assert(u32ESAttr == 0xf3);
2624 /* FS */
2625 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2626 Assert(pCtx->fs.u32Limit == 0xffff);
2627 Assert(u32FSAttr == 0xf3);
2628 /* GS */
2629 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2630 Assert(pCtx->gs.u32Limit == 0xffff);
2631 Assert(u32GSAttr == 0xf3);
2632 /* 64-bit capable CPUs. */
2633 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2634 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2635 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2636 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2637 }
2638}
2639#endif /* VBOX_STRICT */
2640
2641
2642/**
2643 * Exports a guest segment register into the guest-state area in the VMCS.
2644 *
2645 * @returns VBox status code.
2646 * @param pVCpu The cross context virtual CPU structure.
2647 * @param pVmcsInfo The VMCS info. object.
2648 * @param iSegReg The segment register number (X86_SREG_XXX).
2649 * @param pSelReg Pointer to the segment selector.
2650 *
2651 * @remarks No-long-jump zone!!!
2652 */
2653static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2654{
2655 Assert(iSegReg < X86_SREG_COUNT);
2656
2657 uint32_t u32Access = pSelReg->Attr.u;
2658#ifndef IN_NEM_DARWIN
2659 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2660#endif
2661 {
2662 /*
2663 * The way to differentiate between whether this is really a null selector or was just
2664 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2665 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2666 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2667 * NULL selectors loaded in protected-mode have their attribute as 0.
2668 */
2669 if (u32Access)
2670 { }
2671 else
2672 u32Access = X86DESCATTR_UNUSABLE;
2673 }
2674#ifndef IN_NEM_DARWIN
2675 else
2676 {
2677 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2678 u32Access = 0xf3;
2679 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2680 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2681 RT_NOREF_PV(pVCpu);
2682 }
2683#else
2684 RT_NOREF(pVmcsInfo);
2685#endif
2686
2687 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2688 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2689 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2690
2691 /*
2692 * Commit it to the VMCS.
2693 */
2694 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2695 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2696 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2697 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2698 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2699 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2700 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2701 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2702 return VINF_SUCCESS;
2703}
2704
2705
2706/**
2707 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2708 * area in the VMCS.
2709 *
2710 * @returns VBox status code.
2711 * @param pVCpu The cross context virtual CPU structure.
2712 * @param pVmxTransient The VMX-transient structure.
2713 *
2714 * @remarks Will import guest CR0 on strict builds during validation of
2715 * segments.
2716 * @remarks No-long-jump zone!!!
2717 */
2718static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2719{
2720 int rc = VERR_INTERNAL_ERROR_5;
2721#ifndef IN_NEM_DARWIN
2722 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2723#endif
2724 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2725 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2726#ifndef IN_NEM_DARWIN
2727 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2728#endif
2729
2730 /*
2731 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2732 */
2733 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2734 {
2735 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2736 {
2737 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2738#ifndef IN_NEM_DARWIN
2739 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2740 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2741#endif
2742 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2743 AssertRC(rc);
2744 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2745 }
2746
2747 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2748 {
2749 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2750#ifndef IN_NEM_DARWIN
2751 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2752 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2753#endif
2754 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2755 AssertRC(rc);
2756 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2757 }
2758
2759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2760 {
2761 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2762#ifndef IN_NEM_DARWIN
2763 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2764 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2765#endif
2766 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2767 AssertRC(rc);
2768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2769 }
2770
2771 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2772 {
2773 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2774#ifndef IN_NEM_DARWIN
2775 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2776 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2777#endif
2778 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2779 AssertRC(rc);
2780 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2781 }
2782
2783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2784 {
2785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2786#ifndef IN_NEM_DARWIN
2787 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2788 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2789#endif
2790 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2791 AssertRC(rc);
2792 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2793 }
2794
2795 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2796 {
2797 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2798#ifndef IN_NEM_DARWIN
2799 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2800 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2801#endif
2802 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2803 AssertRC(rc);
2804 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2805 }
2806
2807#ifdef VBOX_STRICT
2808 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2809#endif
2810 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2811 pCtx->cs.Attr.u));
2812 }
2813
2814 /*
2815 * Guest TR.
2816 */
2817 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2818 {
2819 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2820
2821 /*
2822 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2823 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2824 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2825 */
2826 uint16_t u16Sel;
2827 uint32_t u32Limit;
2828 uint64_t u64Base;
2829 uint32_t u32AccessRights;
2830#ifndef IN_NEM_DARWIN
2831 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2832#endif
2833 {
2834 u16Sel = pCtx->tr.Sel;
2835 u32Limit = pCtx->tr.u32Limit;
2836 u64Base = pCtx->tr.u64Base;
2837 u32AccessRights = pCtx->tr.Attr.u;
2838 }
2839#ifndef IN_NEM_DARWIN
2840 else
2841 {
2842 Assert(!pVmxTransient->fIsNestedGuest);
2843 Assert(pVM->hm.s.vmx.pRealModeTSS);
2844 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2845
2846 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2847 RTGCPHYS GCPhys;
2848 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2849 AssertRCReturn(rc, rc);
2850
2851 X86DESCATTR DescAttr;
2852 DescAttr.u = 0;
2853 DescAttr.n.u1Present = 1;
2854 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2855
2856 u16Sel = 0;
2857 u32Limit = HM_VTX_TSS_SIZE;
2858 u64Base = GCPhys;
2859 u32AccessRights = DescAttr.u;
2860 }
2861#endif
2862
2863 /* Validate. */
2864 Assert(!(u16Sel & RT_BIT(2)));
2865 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2866 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2867 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2868 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2869 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2870 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2871 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2872 Assert( (u32Limit & 0xfff) == 0xfff
2873 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2874 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2875 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2876
2877 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2879 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2880 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2881
2882 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2883 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2884 }
2885
2886 /*
2887 * Guest GDTR.
2888 */
2889 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2890 {
2891 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2892
2893 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2895
2896 /* Validate. */
2897 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2898
2899 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2900 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2901 }
2902
2903 /*
2904 * Guest LDTR.
2905 */
2906 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2907 {
2908 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2909
2910 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2911 uint32_t u32Access;
2912 if ( !pVmxTransient->fIsNestedGuest
2913 && !pCtx->ldtr.Attr.u)
2914 u32Access = X86DESCATTR_UNUSABLE;
2915 else
2916 u32Access = pCtx->ldtr.Attr.u;
2917
2918 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2919 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2920 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2921 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2922
2923 /* Validate. */
2924 if (!(u32Access & X86DESCATTR_UNUSABLE))
2925 {
2926 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2927 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2928 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2929 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2930 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2931 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2932 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2933 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2934 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2935 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2936 }
2937
2938 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2939 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2940 }
2941
2942 /*
2943 * Guest IDTR.
2944 */
2945 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2946 {
2947 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2948
2949 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2950 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2951
2952 /* Validate. */
2953 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2954
2955 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2956 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2957 }
2958
2959 return VINF_SUCCESS;
2960}
2961
2962
2963/**
2964 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2965 * VM-exit interruption info type.
2966 *
2967 * @returns The IEM exception flags.
2968 * @param uVector The event vector.
2969 * @param uVmxEventType The VMX event type.
2970 *
2971 * @remarks This function currently only constructs flags required for
2972 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2973 * and CR2 aspects of an exception are not included).
2974 */
2975static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2976{
2977 uint32_t fIemXcptFlags;
2978 switch (uVmxEventType)
2979 {
2980 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2981 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2982 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2983 break;
2984
2985 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2986 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2987 break;
2988
2989 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2990 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2991 break;
2992
2993 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2994 {
2995 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2996 if (uVector == X86_XCPT_BP)
2997 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2998 else if (uVector == X86_XCPT_OF)
2999 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3000 else
3001 {
3002 fIemXcptFlags = 0;
3003 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3004 }
3005 break;
3006 }
3007
3008 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3009 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3010 break;
3011
3012 default:
3013 fIemXcptFlags = 0;
3014 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3015 break;
3016 }
3017 return fIemXcptFlags;
3018}
3019
3020
3021/**
3022 * Sets an event as a pending event to be injected into the guest.
3023 *
3024 * @param pVCpu The cross context virtual CPU structure.
3025 * @param u32IntInfo The VM-entry interruption-information field.
3026 * @param cbInstr The VM-entry instruction length in bytes (for
3027 * software interrupts, exceptions and privileged
3028 * software exceptions).
3029 * @param u32ErrCode The VM-entry exception error code.
3030 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3031 * page-fault.
3032 */
3033DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3034 RTGCUINTPTR GCPtrFaultAddress)
3035{
3036 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3037 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3038 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3039 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3040 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3041 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3042}
3043
3044
3045/**
3046 * Sets an external interrupt as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 * @param u8Interrupt The external interrupt vector.
3050 */
3051DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3052{
3053 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3057 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3058}
3059
3060
3061/**
3062 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3063 *
3064 * @param pVCpu The cross context virtual CPU structure.
3065 */
3066DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3067{
3068 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3072 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3073}
3074
3075
3076/**
3077 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3078 *
3079 * @param pVCpu The cross context virtual CPU structure.
3080 */
3081DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3082{
3083 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3087 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3088}
3089
3090
3091/**
3092 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3093 *
3094 * @param pVCpu The cross context virtual CPU structure.
3095 */
3096DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3097{
3098 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3102 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3103}
3104
3105
3106/**
3107 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 */
3111DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3112{
3113 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3117 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3118}
3119
3120
3121#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3122/**
3123 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the general-protection exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136
3137
3138/**
3139 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3140 *
3141 * @param pVCpu The cross context virtual CPU structure.
3142 * @param u32ErrCode The error code for the stack exception.
3143 */
3144DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3145{
3146 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3148 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3149 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3150 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3151}
3152#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3153
3154
3155/**
3156 * Fixes up attributes for the specified segment register.
3157 *
3158 * @param pVCpu The cross context virtual CPU structure.
3159 * @param pSelReg The segment register that needs fixing.
3160 * @param pszRegName The register name (for logging and assertions).
3161 */
3162static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3163{
3164 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3165
3166 /*
3167 * If VT-x marks the segment as unusable, most other bits remain undefined:
3168 * - For CS the L, D and G bits have meaning.
3169 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3170 * - For the remaining data segments no bits are defined.
3171 *
3172 * The present bit and the unusable bit has been observed to be set at the
3173 * same time (the selector was supposed to be invalid as we started executing
3174 * a V8086 interrupt in ring-0).
3175 *
3176 * What should be important for the rest of the VBox code, is that the P bit is
3177 * cleared. Some of the other VBox code recognizes the unusable bit, but
3178 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3179 * safe side here, we'll strip off P and other bits we don't care about. If
3180 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3181 *
3182 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3183 */
3184#ifdef VBOX_STRICT
3185 uint32_t const uAttr = pSelReg->Attr.u;
3186#endif
3187
3188 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3189 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3190 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3191
3192#ifdef VBOX_STRICT
3193# ifndef IN_NEM_DARWIN
3194 VMMRZCallRing3Disable(pVCpu);
3195# endif
3196 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3197# ifdef DEBUG_bird
3198 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3199 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3200 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3201# endif
3202# ifndef IN_NEM_DARWIN
3203 VMMRZCallRing3Enable(pVCpu);
3204# endif
3205 NOREF(uAttr);
3206#endif
3207 RT_NOREF2(pVCpu, pszRegName);
3208}
3209
3210
3211/**
3212 * Imports a guest segment register from the current VMCS into the guest-CPU
3213 * context.
3214 *
3215 * @param pVCpu The cross context virtual CPU structure.
3216 * @param iSegReg The segment register number (X86_SREG_XXX).
3217 *
3218 * @remarks Called with interrupts and/or preemption disabled.
3219 */
3220static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3221{
3222 Assert(iSegReg < X86_SREG_COUNT);
3223 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3224 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3225 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3226 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3227
3228 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3229
3230 uint16_t u16Sel;
3231 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3232 pSelReg->Sel = u16Sel;
3233 pSelReg->ValidSel = u16Sel;
3234
3235 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3236 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3237
3238 uint32_t u32Attr;
3239 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3240 pSelReg->Attr.u = u32Attr;
3241 if (u32Attr & X86DESCATTR_UNUSABLE)
3242 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3243
3244 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3245}
3246
3247
3248/**
3249 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3250 *
3251 * @param pVCpu The cross context virtual CPU structure.
3252 *
3253 * @remarks Called with interrupts and/or preemption disabled.
3254 */
3255static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3256{
3257 uint16_t u16Sel;
3258 uint64_t u64Base;
3259 uint32_t u32Limit, u32Attr;
3260 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3261 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3262 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3263 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3264
3265 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3266 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3267 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3268 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3269 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3270 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3271 if (u32Attr & X86DESCATTR_UNUSABLE)
3272 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3273}
3274
3275
3276/**
3277 * Imports the guest TR from the current VMCS into the guest-CPU context.
3278 *
3279 * @param pVCpu The cross context virtual CPU structure.
3280 *
3281 * @remarks Called with interrupts and/or preemption disabled.
3282 */
3283static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3284{
3285 uint16_t u16Sel;
3286 uint64_t u64Base;
3287 uint32_t u32Limit, u32Attr;
3288 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3289 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3290 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3291 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3292
3293 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3294 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3295 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3296 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3297 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3298 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3299 /* TR is the only selector that can never be unusable. */
3300 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3301}
3302
3303
3304/**
3305 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3306 *
3307 * @param pVCpu The cross context virtual CPU structure.
3308 *
3309 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3310 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3311 * instead!!!
3312 */
3313static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3314{
3315 uint64_t u64Val;
3316 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3317 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3318 {
3319 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3320 AssertRC(rc);
3321
3322 pCtx->rip = u64Val;
3323 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3324 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3325 }
3326}
3327
3328
3329/**
3330 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3331 *
3332 * @param pVCpu The cross context virtual CPU structure.
3333 * @param pVmcsInfo The VMCS info. object.
3334 *
3335 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3336 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3337 * instead!!!
3338 */
3339static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3340{
3341 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3342 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3343 {
3344 uint64_t u64Val;
3345 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3346 AssertRC(rc);
3347
3348 pCtx->rflags.u64 = u64Val;
3349#ifndef IN_NEM_DARWIN
3350 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3351 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3352 {
3353 pCtx->eflags.Bits.u1VM = 0;
3354 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3355 }
3356#else
3357 RT_NOREF(pVmcsInfo);
3358#endif
3359 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3360 }
3361}
3362
3363
3364/**
3365 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3366 * context.
3367 *
3368 * @param pVCpu The cross context virtual CPU structure.
3369 * @param pVmcsInfo The VMCS info. object.
3370 *
3371 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3372 * do not log!
3373 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3374 * instead!!!
3375 */
3376static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3377{
3378 uint32_t u32Val;
3379 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3380 if (!u32Val)
3381 {
3382 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3383 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3384 CPUMSetGuestNmiBlocking(pVCpu, false);
3385 }
3386 else
3387 {
3388 /*
3389 * We must import RIP here to set our EM interrupt-inhibited state.
3390 * We also import RFLAGS as our code that evaluates pending interrupts
3391 * before VM-entry requires it.
3392 */
3393 vmxHCImportGuestRip(pVCpu);
3394 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3395
3396 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3397 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3398 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3399 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3400
3401 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3402 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3403 }
3404}
3405
3406
3407/**
3408 * Worker for VMXR0ImportStateOnDemand.
3409 *
3410 * @returns VBox status code.
3411 * @param pVCpu The cross context virtual CPU structure.
3412 * @param pVmcsInfo The VMCS info. object.
3413 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3414 */
3415static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3416{
3417 int rc = VINF_SUCCESS;
3418 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3419 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3420 uint32_t u32Val;
3421
3422 /*
3423 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3424 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3425 * neither are other host platforms.
3426 *
3427 * Committing this temporarily as it prevents BSOD.
3428 *
3429 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3430 */
3431# ifdef RT_OS_WINDOWS
3432 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3433 return VERR_HM_IPE_1;
3434# endif
3435
3436 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3437
3438#ifndef IN_NEM_DARWIN
3439 /*
3440 * We disable interrupts to make the updating of the state and in particular
3441 * the fExtrn modification atomic wrt to preemption hooks.
3442 */
3443 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3444#endif
3445
3446 fWhat &= pCtx->fExtrn;
3447 if (fWhat)
3448 {
3449 do
3450 {
3451 if (fWhat & CPUMCTX_EXTRN_RIP)
3452 vmxHCImportGuestRip(pVCpu);
3453
3454 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3455 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3456
3457 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3458 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3459
3460 if (fWhat & CPUMCTX_EXTRN_RSP)
3461 {
3462 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3463 AssertRC(rc);
3464 }
3465
3466 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3467 {
3468 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3469#ifndef IN_NEM_DARWIN
3470 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3471#else
3472 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3473#endif
3474 if (fWhat & CPUMCTX_EXTRN_CS)
3475 {
3476 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3477 vmxHCImportGuestRip(pVCpu);
3478 if (fRealOnV86Active)
3479 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3480 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3481 }
3482 if (fWhat & CPUMCTX_EXTRN_SS)
3483 {
3484 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3485 if (fRealOnV86Active)
3486 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3487 }
3488 if (fWhat & CPUMCTX_EXTRN_DS)
3489 {
3490 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3491 if (fRealOnV86Active)
3492 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3493 }
3494 if (fWhat & CPUMCTX_EXTRN_ES)
3495 {
3496 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3497 if (fRealOnV86Active)
3498 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3499 }
3500 if (fWhat & CPUMCTX_EXTRN_FS)
3501 {
3502 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3503 if (fRealOnV86Active)
3504 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3505 }
3506 if (fWhat & CPUMCTX_EXTRN_GS)
3507 {
3508 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3509 if (fRealOnV86Active)
3510 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3511 }
3512 }
3513
3514 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3515 {
3516 if (fWhat & CPUMCTX_EXTRN_LDTR)
3517 vmxHCImportGuestLdtr(pVCpu);
3518
3519 if (fWhat & CPUMCTX_EXTRN_GDTR)
3520 {
3521 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3522 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3523 pCtx->gdtr.cbGdt = u32Val;
3524 }
3525
3526 /* Guest IDTR. */
3527 if (fWhat & CPUMCTX_EXTRN_IDTR)
3528 {
3529 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3530 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3531 pCtx->idtr.cbIdt = u32Val;
3532 }
3533
3534 /* Guest TR. */
3535 if (fWhat & CPUMCTX_EXTRN_TR)
3536 {
3537#ifndef IN_NEM_DARWIN
3538 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3539 don't need to import that one. */
3540 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3541#endif
3542 vmxHCImportGuestTr(pVCpu);
3543 }
3544 }
3545
3546 if (fWhat & CPUMCTX_EXTRN_DR7)
3547 {
3548#ifndef IN_NEM_DARWIN
3549 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3550#endif
3551 {
3552 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3553 AssertRC(rc);
3554 }
3555 }
3556
3557 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3558 {
3559 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3560 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3561 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3562 pCtx->SysEnter.cs = u32Val;
3563 }
3564
3565#ifndef IN_NEM_DARWIN
3566 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3567 {
3568 if ( pVM->hmr0.s.fAllow64BitGuests
3569 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3570 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3571 }
3572
3573 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3574 {
3575 if ( pVM->hmr0.s.fAllow64BitGuests
3576 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3577 {
3578 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3579 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3580 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3581 }
3582 }
3583
3584 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3585 {
3586 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3587 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3588 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3589 Assert(pMsrs);
3590 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3591 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3592 for (uint32_t i = 0; i < cMsrs; i++)
3593 {
3594 uint32_t const idMsr = pMsrs[i].u32Msr;
3595 switch (idMsr)
3596 {
3597 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3598 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3599 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3600 default:
3601 {
3602 uint32_t idxLbrMsr;
3603 if (VM_IS_VMX_LBR(pVM))
3604 {
3605 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3606 {
3607 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3608 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3609 break;
3610 }
3611 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3612 {
3613 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3614 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3615 break;
3616 }
3617 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3618 {
3619 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3620 break;
3621 }
3622 /* Fallthru (no break) */
3623 }
3624 pCtx->fExtrn = 0;
3625 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3626 ASMSetFlags(fEFlags);
3627 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3628 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3629 }
3630 }
3631 }
3632 }
3633#endif
3634
3635 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3636 {
3637 if (fWhat & CPUMCTX_EXTRN_CR0)
3638 {
3639 uint64_t u64Cr0;
3640 uint64_t u64Shadow;
3641 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3642 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3643#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3644 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3645 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3646#else
3647 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3648 {
3649 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3650 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3651 }
3652 else
3653 {
3654 /*
3655 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3656 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3657 * re-construct CR0. See @bugref{9180#c95} for details.
3658 */
3659 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3660 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3661 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3662 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3663 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3664 }
3665#endif
3666#ifndef IN_NEM_DARWIN
3667 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3668#endif
3669 CPUMSetGuestCR0(pVCpu, u64Cr0);
3670#ifndef IN_NEM_DARWIN
3671 VMMRZCallRing3Enable(pVCpu);
3672#endif
3673 }
3674
3675 if (fWhat & CPUMCTX_EXTRN_CR4)
3676 {
3677 uint64_t u64Cr4;
3678 uint64_t u64Shadow;
3679 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3680 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3681#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3682 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3683 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3684#else
3685 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3686 {
3687 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3688 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3689 }
3690 else
3691 {
3692 /*
3693 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3694 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3695 * re-construct CR4. See @bugref{9180#c95} for details.
3696 */
3697 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3698 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3699 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3700 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3701 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3702 }
3703#endif
3704 pCtx->cr4 = u64Cr4;
3705 }
3706
3707 if (fWhat & CPUMCTX_EXTRN_CR3)
3708 {
3709 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3710 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3711 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3712 && CPUMIsGuestPagingEnabledEx(pCtx)))
3713 {
3714 uint64_t u64Cr3;
3715 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3716 if (pCtx->cr3 != u64Cr3)
3717 {
3718 pCtx->cr3 = u64Cr3;
3719 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3720 }
3721
3722 /*
3723 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3724 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3725 */
3726 if (CPUMIsGuestInPAEModeEx(pCtx))
3727 {
3728 X86PDPE aPaePdpes[4];
3729 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3730 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3731 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3732 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3733 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3734 {
3735 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3736 /* PGM now updates PAE PDPTEs while updating CR3. */
3737 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3738 }
3739 }
3740 }
3741 }
3742 }
3743
3744#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3745 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3746 {
3747 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3748 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3749 {
3750 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3751 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3752 if (RT_SUCCESS(rc))
3753 { /* likely */ }
3754 else
3755 break;
3756 }
3757 }
3758#endif
3759 } while (0);
3760
3761 if (RT_SUCCESS(rc))
3762 {
3763 /* Update fExtrn. */
3764 pCtx->fExtrn &= ~fWhat;
3765
3766 /* If everything has been imported, clear the HM keeper bit. */
3767 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3768 {
3769#ifndef IN_NEM_DARWIN
3770 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3771#else
3772 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3773#endif
3774 Assert(!pCtx->fExtrn);
3775 }
3776 }
3777 }
3778#ifndef IN_NEM_DARWIN
3779 else
3780 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3781
3782 /*
3783 * Restore interrupts.
3784 */
3785 ASMSetFlags(fEFlags);
3786#endif
3787
3788 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3789
3790 if (RT_SUCCESS(rc))
3791 { /* likely */ }
3792 else
3793 return rc;
3794
3795 /*
3796 * Honor any pending CR3 updates.
3797 *
3798 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3799 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3800 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3801 *
3802 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3803 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3804 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3805 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3806 *
3807 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3808 *
3809 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3810 */
3811 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3812#ifndef IN_NEM_DARWIN
3813 && VMMRZCallRing3IsEnabled(pVCpu)
3814#endif
3815 )
3816 {
3817 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3818 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3819 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3820 }
3821
3822 return VINF_SUCCESS;
3823}
3824
3825
3826/**
3827 * Check per-VM and per-VCPU force flag actions that require us to go back to
3828 * ring-3 for one reason or another.
3829 *
3830 * @returns Strict VBox status code (i.e. informational status codes too)
3831 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3832 * ring-3.
3833 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3834 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3835 * interrupts)
3836 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3837 * all EMTs to be in ring-3.
3838 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3839 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3840 * to the EM loop.
3841 *
3842 * @param pVCpu The cross context virtual CPU structure.
3843 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3844 * @param fStepping Whether we are single-stepping the guest using the
3845 * hypervisor debugger.
3846 *
3847 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3848 * is no longer in VMX non-root mode.
3849 */
3850static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3851{
3852#ifndef IN_NEM_DARWIN
3853 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3854#endif
3855
3856 /*
3857 * Update pending interrupts into the APIC's IRR.
3858 */
3859 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3860 APICUpdatePendingInterrupts(pVCpu);
3861
3862 /*
3863 * Anything pending? Should be more likely than not if we're doing a good job.
3864 */
3865 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3866 if ( !fStepping
3867 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3868 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3869 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3870 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3871 return VINF_SUCCESS;
3872
3873 /* Pending PGM C3 sync. */
3874 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3875 {
3876 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3877 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3878 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3879 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3883 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886 }
3887
3888 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3889 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3890 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3891 {
3892 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3893 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3894 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3895 return rc;
3896 }
3897
3898 /* Pending VM request packets, such as hardware interrupts. */
3899 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3900 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3901 {
3902 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3903 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3904 return VINF_EM_PENDING_REQUEST;
3905 }
3906
3907 /* Pending PGM pool flushes. */
3908 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3909 {
3910 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3911 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3912 return VINF_PGM_POOL_FLUSH_PENDING;
3913 }
3914
3915 /* Pending DMA requests. */
3916 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3917 {
3918 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3919 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3920 return VINF_EM_RAW_TO_R3;
3921 }
3922
3923#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3924 /*
3925 * Pending nested-guest events.
3926 *
3927 * Please note the priority of these events are specified and important.
3928 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3929 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3930 */
3931 if (fIsNestedGuest)
3932 {
3933 /* Pending nested-guest APIC-write. */
3934 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3935 {
3936 Log4Func(("Pending nested-guest APIC-write\n"));
3937 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3938 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3939 return rcStrict;
3940 }
3941
3942 /* Pending nested-guest monitor-trap flag (MTF). */
3943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3944 {
3945 Log4Func(("Pending nested-guest MTF\n"));
3946 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3947 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3948 return rcStrict;
3949 }
3950
3951 /* Pending nested-guest VMX-preemption timer expired. */
3952 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3953 {
3954 Log4Func(("Pending nested-guest preempt timer\n"));
3955 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3956 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3957 return rcStrict;
3958 }
3959 }
3960#else
3961 NOREF(fIsNestedGuest);
3962#endif
3963
3964 return VINF_SUCCESS;
3965}
3966
3967
3968/**
3969 * Converts any TRPM trap into a pending HM event. This is typically used when
3970 * entering from ring-3 (not longjmp returns).
3971 *
3972 * @param pVCpu The cross context virtual CPU structure.
3973 */
3974static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3975{
3976 Assert(TRPMHasTrap(pVCpu));
3977 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3978
3979 uint8_t uVector;
3980 TRPMEVENT enmTrpmEvent;
3981 uint32_t uErrCode;
3982 RTGCUINTPTR GCPtrFaultAddress;
3983 uint8_t cbInstr;
3984 bool fIcebp;
3985
3986 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3987 AssertRC(rc);
3988
3989 uint32_t u32IntInfo;
3990 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3991 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3992
3993 rc = TRPMResetTrap(pVCpu);
3994 AssertRC(rc);
3995 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3996 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3997
3998 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
3999}
4000
4001
4002/**
4003 * Converts the pending HM event into a TRPM trap.
4004 *
4005 * @param pVCpu The cross context virtual CPU structure.
4006 */
4007static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4008{
4009 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4010
4011 /* If a trap was already pending, we did something wrong! */
4012 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4013
4014 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4015 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4016 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4017
4018 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4019
4020 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4021 AssertRC(rc);
4022
4023 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4024 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4025
4026 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4027 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4028 else
4029 {
4030 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4031 switch (uVectorType)
4032 {
4033 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4034 TRPMSetTrapDueToIcebp(pVCpu);
4035 RT_FALL_THRU();
4036 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4037 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4038 {
4039 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4040 || ( uVector == X86_XCPT_BP /* INT3 */
4041 || uVector == X86_XCPT_OF /* INTO */
4042 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4043 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4044 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4045 break;
4046 }
4047 }
4048 }
4049
4050 /* We're now done converting the pending event. */
4051 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4052}
4053
4054
4055/**
4056 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4057 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4058 *
4059 * @param pVCpu The cross context virtual CPU structure.
4060 * @param pVmcsInfo The VMCS info. object.
4061 */
4062static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4063{
4064 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4065 {
4066 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4067 {
4068 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4069 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4070 AssertRC(rc);
4071 }
4072 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4073}
4074
4075
4076/**
4077 * Clears the interrupt-window exiting control in the VMCS.
4078 *
4079 * @param pVCpu The cross context virtual CPU structure.
4080 * @param pVmcsInfo The VMCS info. object.
4081 */
4082DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4083{
4084 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4085 {
4086 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4087 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4088 AssertRC(rc);
4089 }
4090}
4091
4092
4093/**
4094 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4095 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4096 *
4097 * @param pVCpu The cross context virtual CPU structure.
4098 * @param pVmcsInfo The VMCS info. object.
4099 */
4100static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4101{
4102 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4103 {
4104 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4105 {
4106 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4107 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4108 AssertRC(rc);
4109 Log4Func(("Setup NMI-window exiting\n"));
4110 }
4111 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4112}
4113
4114
4115/**
4116 * Clears the NMI-window exiting control in the VMCS.
4117 *
4118 * @param pVCpu The cross context virtual CPU structure.
4119 * @param pVmcsInfo The VMCS info. object.
4120 */
4121DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4122{
4123 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4124 {
4125 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4126 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4127 AssertRC(rc);
4128 }
4129}
4130
4131
4132/**
4133 * Injects an event into the guest upon VM-entry by updating the relevant fields
4134 * in the VM-entry area in the VMCS.
4135 *
4136 * @returns Strict VBox status code (i.e. informational status codes too).
4137 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4138 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4139 *
4140 * @param pVCpu The cross context virtual CPU structure.
4141 * @param pVmxTransient The VMX-transient structure.
4142 * @param pEvent The event being injected.
4143 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4144 * will be updated if necessary. This cannot not be NULL.
4145 * @param fStepping Whether we're single-stepping guest execution and should
4146 * return VINF_EM_DBG_STEPPED if the event is injected
4147 * directly (registers modified by us, not by hardware on
4148 * VM-entry).
4149 */
4150static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4151 uint32_t *pfIntrState)
4152{
4153 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4154 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4155 Assert(pfIntrState);
4156
4157#ifdef IN_NEM_DARWIN
4158 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4159#endif
4160
4161 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4162 uint32_t u32IntInfo = pEvent->u64IntInfo;
4163 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4164 uint32_t const cbInstr = pEvent->cbInstr;
4165 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4166 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4167 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4168
4169#ifdef VBOX_STRICT
4170 /*
4171 * Validate the error-code-valid bit for hardware exceptions.
4172 * No error codes for exceptions in real-mode.
4173 *
4174 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4175 */
4176 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4177 && !CPUMIsGuestInRealModeEx(pCtx))
4178 {
4179 switch (uVector)
4180 {
4181 case X86_XCPT_PF:
4182 case X86_XCPT_DF:
4183 case X86_XCPT_TS:
4184 case X86_XCPT_NP:
4185 case X86_XCPT_SS:
4186 case X86_XCPT_GP:
4187 case X86_XCPT_AC:
4188 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4189 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4190 RT_FALL_THRU();
4191 default:
4192 break;
4193 }
4194 }
4195
4196 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4197 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4198 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4199#endif
4200
4201 RT_NOREF(uVector);
4202 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4203 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4204 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4205 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4206 {
4207 Assert(uVector <= X86_XCPT_LAST);
4208 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4209 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4210 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4211 }
4212 else
4213 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4214
4215 /*
4216 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4217 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4218 * interrupt handler in the (real-mode) guest.
4219 *
4220 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4221 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4222 */
4223 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4224 {
4225#ifndef IN_NEM_DARWIN
4226 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4227#endif
4228 {
4229 /*
4230 * For CPUs with unrestricted guest execution enabled and with the guest
4231 * in real-mode, we must not set the deliver-error-code bit.
4232 *
4233 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4234 */
4235 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4236 }
4237#ifndef IN_NEM_DARWIN
4238 else
4239 {
4240 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4241 Assert(PDMVmmDevHeapIsEnabled(pVM));
4242 Assert(pVM->hm.s.vmx.pRealModeTSS);
4243 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4244
4245 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4246 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4247 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4248 AssertRCReturn(rc2, rc2);
4249
4250 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4251 size_t const cbIdtEntry = sizeof(X86IDTR16);
4252 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4253 {
4254 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4255 if (uVector == X86_XCPT_DF)
4256 return VINF_EM_RESET;
4257
4258 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4259 No error codes for exceptions in real-mode. */
4260 if (uVector == X86_XCPT_GP)
4261 {
4262 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4263 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4264 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4265 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4266 HMEVENT EventXcptDf;
4267 RT_ZERO(EventXcptDf);
4268 EventXcptDf.u64IntInfo = uXcptDfInfo;
4269 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4270 }
4271
4272 /*
4273 * If we're injecting an event with no valid IDT entry, inject a #GP.
4274 * No error codes for exceptions in real-mode.
4275 *
4276 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4277 */
4278 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4279 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4281 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4282 HMEVENT EventXcptGp;
4283 RT_ZERO(EventXcptGp);
4284 EventXcptGp.u64IntInfo = uXcptGpInfo;
4285 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4286 }
4287
4288 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4289 uint16_t uGuestIp = pCtx->ip;
4290 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4291 {
4292 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4293 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4294 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4295 }
4296 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4297 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4298
4299 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4300 X86IDTR16 IdtEntry;
4301 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4302 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4303 AssertRCReturn(rc2, rc2);
4304
4305 /* Construct the stack frame for the interrupt/exception handler. */
4306 VBOXSTRICTRC rcStrict;
4307 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4308 if (rcStrict == VINF_SUCCESS)
4309 {
4310 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4311 if (rcStrict == VINF_SUCCESS)
4312 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4313 }
4314
4315 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4316 if (rcStrict == VINF_SUCCESS)
4317 {
4318 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4319 pCtx->rip = IdtEntry.offSel;
4320 pCtx->cs.Sel = IdtEntry.uSel;
4321 pCtx->cs.ValidSel = IdtEntry.uSel;
4322 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4323 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4324 && uVector == X86_XCPT_PF)
4325 pCtx->cr2 = GCPtrFault;
4326
4327 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4328 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4329 | HM_CHANGED_GUEST_RSP);
4330
4331 /*
4332 * If we delivered a hardware exception (other than an NMI) and if there was
4333 * block-by-STI in effect, we should clear it.
4334 */
4335 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4336 {
4337 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4338 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4339 Log4Func(("Clearing inhibition due to STI\n"));
4340 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4341 }
4342
4343 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4344 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4345
4346 /*
4347 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4348 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4349 */
4350 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4351
4352 /*
4353 * If we eventually support nested-guest execution without unrestricted guest execution,
4354 * we should set fInterceptEvents here.
4355 */
4356 Assert(!fIsNestedGuest);
4357
4358 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4359 if (fStepping)
4360 rcStrict = VINF_EM_DBG_STEPPED;
4361 }
4362 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4363 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4364 return rcStrict;
4365 }
4366#else
4367 RT_NOREF(pVmcsInfo);
4368#endif
4369 }
4370
4371 /*
4372 * Validate.
4373 */
4374 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4375 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4376
4377 /*
4378 * Inject the event into the VMCS.
4379 */
4380 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4381 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4382 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4383 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4384 AssertRC(rc);
4385
4386 /*
4387 * Update guest CR2 if this is a page-fault.
4388 */
4389 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4390 pCtx->cr2 = GCPtrFault;
4391
4392 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4393 return VINF_SUCCESS;
4394}
4395
4396
4397/**
4398 * Evaluates the event to be delivered to the guest and sets it as the pending
4399 * event.
4400 *
4401 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4402 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4403 * NOT restore these force-flags.
4404 *
4405 * @returns Strict VBox status code (i.e. informational status codes too).
4406 * @param pVCpu The cross context virtual CPU structure.
4407 * @param pVmcsInfo The VMCS information structure.
4408 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4409 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4410 */
4411static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4412{
4413 Assert(pfIntrState);
4414 Assert(!TRPMHasTrap(pVCpu));
4415
4416 /*
4417 * Compute/update guest-interruptibility state related FFs.
4418 * The FFs will be used below while evaluating events to be injected.
4419 */
4420 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4421
4422 /*
4423 * Evaluate if a new event needs to be injected.
4424 * An event that's already pending has already performed all necessary checks.
4425 */
4426 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4427 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4428 {
4429 /** @todo SMI. SMIs take priority over NMIs. */
4430
4431 /*
4432 * NMIs.
4433 * NMIs take priority over external interrupts.
4434 */
4435#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4436 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4437#endif
4438 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4439 {
4440 /*
4441 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4442 *
4443 * For a nested-guest, the FF always indicates the outer guest's ability to
4444 * receive an NMI while the guest-interruptibility state bit depends on whether
4445 * the nested-hypervisor is using virtual-NMIs.
4446 */
4447 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4448 {
4449#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4450 if ( fIsNestedGuest
4451 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4452 return IEMExecVmxVmexitXcptNmi(pVCpu);
4453#endif
4454 vmxHCSetPendingXcptNmi(pVCpu);
4455 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4456 Log4Func(("NMI pending injection\n"));
4457
4458 /* We've injected the NMI, bail. */
4459 return VINF_SUCCESS;
4460 }
4461 else if (!fIsNestedGuest)
4462 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4463 }
4464
4465 /*
4466 * External interrupts (PIC/APIC).
4467 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4468 * We cannot re-request the interrupt from the controller again.
4469 */
4470 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4471 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4472 {
4473 Assert(!DBGFIsStepping(pVCpu));
4474 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4475 AssertRC(rc);
4476
4477 /*
4478 * We must not check EFLAGS directly when executing a nested-guest, use
4479 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4480 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4481 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4482 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4483 *
4484 * See Intel spec. 25.4.1 "Event Blocking".
4485 */
4486 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4487 {
4488#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4489 if ( fIsNestedGuest
4490 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4491 {
4492 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4493 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4494 return rcStrict;
4495 }
4496#endif
4497 uint8_t u8Interrupt;
4498 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4499 if (RT_SUCCESS(rc))
4500 {
4501#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4502 if ( fIsNestedGuest
4503 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4504 {
4505 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4506 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4507 return rcStrict;
4508 }
4509#endif
4510 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4511 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4512 }
4513 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4514 {
4515 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4516
4517 if ( !fIsNestedGuest
4518 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4519 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4520 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4521
4522 /*
4523 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4524 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4525 * need to re-set this force-flag here.
4526 */
4527 }
4528 else
4529 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4530
4531 /* We've injected the interrupt or taken necessary action, bail. */
4532 return VINF_SUCCESS;
4533 }
4534 if (!fIsNestedGuest)
4535 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4536 }
4537 }
4538 else if (!fIsNestedGuest)
4539 {
4540 /*
4541 * An event is being injected or we are in an interrupt shadow. Check if another event is
4542 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4543 * the pending event.
4544 */
4545 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4546 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4547 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4548 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4549 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4550 }
4551 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4552
4553 return VINF_SUCCESS;
4554}
4555
4556
4557/**
4558 * Injects any pending events into the guest if the guest is in a state to
4559 * receive them.
4560 *
4561 * @returns Strict VBox status code (i.e. informational status codes too).
4562 * @param pVCpu The cross context virtual CPU structure.
4563 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4564 * @param fIntrState The VT-x guest-interruptibility state.
4565 * @param fStepping Whether we are single-stepping the guest using the
4566 * hypervisor debugger and should return
4567 * VINF_EM_DBG_STEPPED if the event was dispatched
4568 * directly.
4569 */
4570static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4571{
4572 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4573#ifndef IN_NEM_DARWIN
4574 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4575#endif
4576
4577#ifdef VBOX_STRICT
4578 /*
4579 * Verify guest-interruptibility state.
4580 *
4581 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4582 * since injecting an event may modify the interruptibility state and we must thus always
4583 * use fIntrState.
4584 */
4585 {
4586 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4587 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4588 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4589 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4590 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4591 Assert(!TRPMHasTrap(pVCpu));
4592 NOREF(fBlockMovSS); NOREF(fBlockSti);
4593 }
4594#endif
4595
4596 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4597 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4598 {
4599 /*
4600 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4601 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4602 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4603 *
4604 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4605 */
4606 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4607#ifdef VBOX_STRICT
4608 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4609 {
4610 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4611 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4612 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4613 }
4614 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4615 {
4616 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4617 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4618 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4619 }
4620#endif
4621 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4622 uIntType));
4623
4624 /*
4625 * Inject the event and get any changes to the guest-interruptibility state.
4626 *
4627 * The guest-interruptibility state may need to be updated if we inject the event
4628 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4629 */
4630 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4631 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4632
4633 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4634 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4635 else
4636 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4637 }
4638
4639 /*
4640 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4641 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4642 */
4643 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4644 && !fIsNestedGuest)
4645 {
4646 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4647
4648 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4649 {
4650 /*
4651 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4652 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4653 */
4654 Assert(!DBGFIsStepping(pVCpu));
4655 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4656 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4657 AssertRC(rc);
4658 }
4659 else
4660 {
4661 /*
4662 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4663 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4664 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4665 * we use MTF, so just make sure it's called before executing guest-code.
4666 */
4667 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4668 }
4669 }
4670 /* else: for nested-guest currently handling while merging controls. */
4671
4672 /*
4673 * Finally, update the guest-interruptibility state.
4674 *
4675 * This is required for the real-on-v86 software interrupt injection, for
4676 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4677 */
4678 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4679 AssertRC(rc);
4680
4681 /*
4682 * There's no need to clear the VM-entry interruption-information field here if we're not
4683 * injecting anything. VT-x clears the valid bit on every VM-exit.
4684 *
4685 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4686 */
4687
4688 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4689 return rcStrict;
4690}
4691
4692
4693/**
4694 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4695 * and update error record fields accordingly.
4696 *
4697 * @returns VMX_IGS_* error codes.
4698 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4699 * wrong with the guest state.
4700 *
4701 * @param pVCpu The cross context virtual CPU structure.
4702 * @param pVmcsInfo The VMCS info. object.
4703 *
4704 * @remarks This function assumes our cache of the VMCS controls
4705 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4706 */
4707static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4708{
4709#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4710#define HMVMX_CHECK_BREAK(expr, err) do { \
4711 if (!(expr)) { uError = (err); break; } \
4712 } while (0)
4713
4714 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4715 uint32_t uError = VMX_IGS_ERROR;
4716 uint32_t u32IntrState = 0;
4717#ifndef IN_NEM_DARWIN
4718 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4719 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4720#else
4721 bool const fUnrestrictedGuest = true;
4722#endif
4723 do
4724 {
4725 int rc;
4726
4727 /*
4728 * Guest-interruptibility state.
4729 *
4730 * Read this first so that any check that fails prior to those that actually
4731 * require the guest-interruptibility state would still reflect the correct
4732 * VMCS value and avoids causing further confusion.
4733 */
4734 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4735 AssertRC(rc);
4736
4737 uint32_t u32Val;
4738 uint64_t u64Val;
4739
4740 /*
4741 * CR0.
4742 */
4743 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4744 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4745 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4746 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4747 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4748 if (fUnrestrictedGuest)
4749 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4750
4751 uint64_t u64GuestCr0;
4752 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4753 AssertRC(rc);
4754 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4755 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4756 if ( !fUnrestrictedGuest
4757 && (u64GuestCr0 & X86_CR0_PG)
4758 && !(u64GuestCr0 & X86_CR0_PE))
4759 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4760
4761 /*
4762 * CR4.
4763 */
4764 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4765 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4766 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4767
4768 uint64_t u64GuestCr4;
4769 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4770 AssertRC(rc);
4771 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4772 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4773
4774 /*
4775 * IA32_DEBUGCTL MSR.
4776 */
4777 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4778 AssertRC(rc);
4779 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4780 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4781 {
4782 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4783 }
4784 uint64_t u64DebugCtlMsr = u64Val;
4785
4786#ifdef VBOX_STRICT
4787 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4788 AssertRC(rc);
4789 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4790#endif
4791 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4792
4793 /*
4794 * RIP and RFLAGS.
4795 */
4796 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4797 AssertRC(rc);
4798 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4799 if ( !fLongModeGuest
4800 || !pCtx->cs.Attr.n.u1Long)
4801 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4802 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4803 * must be identical if the "IA-32e mode guest" VM-entry
4804 * control is 1 and CS.L is 1. No check applies if the
4805 * CPU supports 64 linear-address bits. */
4806
4807 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4808 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4809 AssertRC(rc);
4810 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4811 VMX_IGS_RFLAGS_RESERVED);
4812 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4813 uint32_t const u32Eflags = u64Val;
4814
4815 if ( fLongModeGuest
4816 || ( fUnrestrictedGuest
4817 && !(u64GuestCr0 & X86_CR0_PE)))
4818 {
4819 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4820 }
4821
4822 uint32_t u32EntryInfo;
4823 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4824 AssertRC(rc);
4825 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4826 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4827
4828 /*
4829 * 64-bit checks.
4830 */
4831 if (fLongModeGuest)
4832 {
4833 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4834 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4835 }
4836
4837 if ( !fLongModeGuest
4838 && (u64GuestCr4 & X86_CR4_PCIDE))
4839 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4840
4841 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4842 * 51:32 beyond the processor's physical-address width are 0. */
4843
4844 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4845 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4846 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4847
4848#ifndef IN_NEM_DARWIN
4849 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4850 AssertRC(rc);
4851 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4852
4853 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4854 AssertRC(rc);
4855 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4856#endif
4857
4858 /*
4859 * PERF_GLOBAL MSR.
4860 */
4861 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4862 {
4863 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4864 AssertRC(rc);
4865 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4866 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4867 }
4868
4869 /*
4870 * PAT MSR.
4871 */
4872 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4873 {
4874 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4875 AssertRC(rc);
4876 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4877 for (unsigned i = 0; i < 8; i++)
4878 {
4879 uint8_t u8Val = (u64Val & 0xff);
4880 if ( u8Val != 0 /* UC */
4881 && u8Val != 1 /* WC */
4882 && u8Val != 4 /* WT */
4883 && u8Val != 5 /* WP */
4884 && u8Val != 6 /* WB */
4885 && u8Val != 7 /* UC- */)
4886 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4887 u64Val >>= 8;
4888 }
4889 }
4890
4891 /*
4892 * EFER MSR.
4893 */
4894 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4895 {
4896 Assert(g_fHmVmxSupportsVmcsEfer);
4897 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4898 AssertRC(rc);
4899 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4900 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4901 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4902 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4903 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4904 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4905 * iemVmxVmentryCheckGuestState(). */
4906 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4907 || !(u64GuestCr0 & X86_CR0_PG)
4908 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4909 VMX_IGS_EFER_LMA_LME_MISMATCH);
4910 }
4911
4912 /*
4913 * Segment registers.
4914 */
4915 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4916 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4917 if (!(u32Eflags & X86_EFL_VM))
4918 {
4919 /* CS */
4920 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4921 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4922 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4923 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4924 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4925 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4926 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4927 /* CS cannot be loaded with NULL in protected mode. */
4928 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4929 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4930 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4932 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4933 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4934 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4935 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4936 else
4937 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4938
4939 /* SS */
4940 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4941 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4942 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4943 if ( !(pCtx->cr0 & X86_CR0_PE)
4944 || pCtx->cs.Attr.n.u4Type == 3)
4945 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4946
4947 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4948 {
4949 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4950 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4951 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4952 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4953 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4954 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4955 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4956 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4957 }
4958
4959 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4960 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4961 {
4962 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4963 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4964 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4965 || pCtx->ds.Attr.n.u4Type > 11
4966 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4967 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4968 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4969 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4970 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4971 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4972 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4973 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4974 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4975 }
4976 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4977 {
4978 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4979 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4980 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4981 || pCtx->es.Attr.n.u4Type > 11
4982 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4983 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4984 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4985 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4986 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4987 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4988 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4989 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4990 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4991 }
4992 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4993 {
4994 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4995 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4996 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4997 || pCtx->fs.Attr.n.u4Type > 11
4998 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
4999 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5000 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5001 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5002 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5003 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5004 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5005 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5006 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5007 }
5008 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5009 {
5010 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5011 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5012 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5013 || pCtx->gs.Attr.n.u4Type > 11
5014 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5015 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5016 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5017 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5018 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5019 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5020 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5021 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5022 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5023 }
5024 /* 64-bit capable CPUs. */
5025 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5026 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5027 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5028 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5029 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5030 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5031 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5032 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5033 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5034 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5035 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5036 }
5037 else
5038 {
5039 /* V86 mode checks. */
5040 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5041 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5042 {
5043 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5044 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5045 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5046 }
5047 else
5048 {
5049 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5050 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5051 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5052 }
5053
5054 /* CS */
5055 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5056 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5057 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5058 /* SS */
5059 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5060 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5061 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5062 /* DS */
5063 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5064 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5065 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5066 /* ES */
5067 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5068 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5069 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5070 /* FS */
5071 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5072 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5073 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5074 /* GS */
5075 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5076 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5077 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5078 /* 64-bit capable CPUs. */
5079 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5080 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5081 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5082 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5083 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5084 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5085 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5086 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5087 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5088 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5089 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5090 }
5091
5092 /*
5093 * TR.
5094 */
5095 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5096 /* 64-bit capable CPUs. */
5097 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5098 if (fLongModeGuest)
5099 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5100 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5101 else
5102 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5103 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5104 VMX_IGS_TR_ATTR_TYPE_INVALID);
5105 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5106 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5107 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5108 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5109 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5110 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5111 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5112 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5113
5114 /*
5115 * GDTR and IDTR (64-bit capable checks).
5116 */
5117 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5118 AssertRC(rc);
5119 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5120
5121 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5122 AssertRC(rc);
5123 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5124
5125 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5126 AssertRC(rc);
5127 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5128
5129 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5130 AssertRC(rc);
5131 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5132
5133 /*
5134 * Guest Non-Register State.
5135 */
5136 /* Activity State. */
5137 uint32_t u32ActivityState;
5138 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5139 AssertRC(rc);
5140 HMVMX_CHECK_BREAK( !u32ActivityState
5141 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5142 VMX_IGS_ACTIVITY_STATE_INVALID);
5143 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5144 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5145
5146 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5147 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5148 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5149
5150 /** @todo Activity state and injecting interrupts. Left as a todo since we
5151 * currently don't use activity states but ACTIVE. */
5152
5153 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5154 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5155
5156 /* Guest interruptibility-state. */
5157 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5158 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5159 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5160 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5161 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5162 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5163 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5164 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5165 {
5166 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5167 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5168 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5169 }
5170 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5171 {
5172 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5173 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5174 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5176 }
5177 /** @todo Assumes the processor is not in SMM. */
5178 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5179 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5180 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5181 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5182 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5183 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5184 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5185 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5186
5187 /* Pending debug exceptions. */
5188 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5189 AssertRC(rc);
5190 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5191 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5192 u32Val = u64Val; /* For pending debug exceptions checks below. */
5193
5194 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5195 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5196 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5197 {
5198 if ( (u32Eflags & X86_EFL_TF)
5199 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5200 {
5201 /* Bit 14 is PendingDebug.BS. */
5202 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5203 }
5204 if ( !(u32Eflags & X86_EFL_TF)
5205 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5206 {
5207 /* Bit 14 is PendingDebug.BS. */
5208 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5209 }
5210 }
5211
5212#ifndef IN_NEM_DARWIN
5213 /* VMCS link pointer. */
5214 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5215 AssertRC(rc);
5216 if (u64Val != UINT64_C(0xffffffffffffffff))
5217 {
5218 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5219 /** @todo Bits beyond the processor's physical-address width MBZ. */
5220 /** @todo SMM checks. */
5221 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5222 Assert(pVmcsInfo->pvShadowVmcs);
5223 VMXVMCSREVID VmcsRevId;
5224 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5225 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5226 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5227 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5228 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5229 }
5230
5231 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5232 * not using nested paging? */
5233 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5234 && !fLongModeGuest
5235 && CPUMIsGuestInPAEModeEx(pCtx))
5236 {
5237 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5238 AssertRC(rc);
5239 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5240
5241 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5242 AssertRC(rc);
5243 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5244
5245 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5246 AssertRC(rc);
5247 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5248
5249 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5250 AssertRC(rc);
5251 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5252 }
5253#endif
5254
5255 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5256 if (uError == VMX_IGS_ERROR)
5257 uError = VMX_IGS_REASON_NOT_FOUND;
5258 } while (0);
5259
5260 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5261 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5262 return uError;
5263
5264#undef HMVMX_ERROR_BREAK
5265#undef HMVMX_CHECK_BREAK
5266}
5267/** @} */
5268
5269
5270#ifndef HMVMX_USE_FUNCTION_TABLE
5271/**
5272 * Handles a guest VM-exit from hardware-assisted VMX execution.
5273 *
5274 * @returns Strict VBox status code (i.e. informational status codes too).
5275 * @param pVCpu The cross context virtual CPU structure.
5276 * @param pVmxTransient The VMX-transient structure.
5277 */
5278DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5279{
5280#ifdef DEBUG_ramshankar
5281# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5282 do { \
5283 if (a_fSave != 0) \
5284 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5285 VBOXSTRICTRC rcStrict = a_CallExpr; \
5286 if (a_fSave != 0) \
5287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5288 return rcStrict; \
5289 } while (0)
5290#else
5291# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5292#endif
5293 uint32_t const uExitReason = pVmxTransient->uExitReason;
5294 switch (uExitReason)
5295 {
5296 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5297 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5298 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5299 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5300 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5301 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5302 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5303 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5304 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5305 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5306 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5307 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5308 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5309 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5310 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5311 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5312 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5313 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5314 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5315 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5316 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5317 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5318 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5319 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5320 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5321 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5322 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5323 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5324 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5325 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5326#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5327 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5328 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5329 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5330 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5331 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5332 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5333 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5334 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5335 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5336 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5337 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient));
5338#else
5339 case VMX_EXIT_VMCLEAR:
5340 case VMX_EXIT_VMLAUNCH:
5341 case VMX_EXIT_VMPTRLD:
5342 case VMX_EXIT_VMPTRST:
5343 case VMX_EXIT_VMREAD:
5344 case VMX_EXIT_VMRESUME:
5345 case VMX_EXIT_VMWRITE:
5346 case VMX_EXIT_VMXOFF:
5347 case VMX_EXIT_VMXON:
5348 case VMX_EXIT_INVVPID:
5349 case VMX_EXIT_INVEPT:
5350 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5351#endif
5352
5353 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5354 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5355 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5356
5357 case VMX_EXIT_INIT_SIGNAL:
5358 case VMX_EXIT_SIPI:
5359 case VMX_EXIT_IO_SMI:
5360 case VMX_EXIT_SMI:
5361 case VMX_EXIT_ERR_MSR_LOAD:
5362 case VMX_EXIT_ERR_MACHINE_CHECK:
5363 case VMX_EXIT_PML_FULL:
5364 case VMX_EXIT_VIRTUALIZED_EOI:
5365 case VMX_EXIT_GDTR_IDTR_ACCESS:
5366 case VMX_EXIT_LDTR_TR_ACCESS:
5367 case VMX_EXIT_APIC_WRITE:
5368 case VMX_EXIT_RDRAND:
5369 case VMX_EXIT_RSM:
5370 case VMX_EXIT_VMFUNC:
5371 case VMX_EXIT_ENCLS:
5372 case VMX_EXIT_RDSEED:
5373 case VMX_EXIT_XSAVES:
5374 case VMX_EXIT_XRSTORS:
5375 case VMX_EXIT_UMWAIT:
5376 case VMX_EXIT_TPAUSE:
5377 case VMX_EXIT_LOADIWKEY:
5378 default:
5379 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5380 }
5381#undef VMEXIT_CALL_RET
5382}
5383#endif /* !HMVMX_USE_FUNCTION_TABLE */
5384
5385
5386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5387/**
5388 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5389 *
5390 * @returns Strict VBox status code (i.e. informational status codes too).
5391 * @param pVCpu The cross context virtual CPU structure.
5392 * @param pVmxTransient The VMX-transient structure.
5393 */
5394DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5395{
5396 uint32_t const uExitReason = pVmxTransient->uExitReason;
5397 switch (uExitReason)
5398 {
5399 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5400 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5401 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5402 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5403 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5404
5405 /*
5406 * We shouldn't direct host physical interrupts to the nested-guest.
5407 */
5408 case VMX_EXIT_EXT_INT:
5409 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5410
5411 /*
5412 * Instructions that cause VM-exits unconditionally or the condition is
5413 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5414 * happens, it's guaranteed to be a nested-guest VM-exit).
5415 *
5416 * - Provides VM-exit instruction length ONLY.
5417 */
5418 case VMX_EXIT_CPUID: /* Unconditional. */
5419 case VMX_EXIT_VMCALL:
5420 case VMX_EXIT_GETSEC:
5421 case VMX_EXIT_INVD:
5422 case VMX_EXIT_XSETBV:
5423 case VMX_EXIT_VMLAUNCH:
5424 case VMX_EXIT_VMRESUME:
5425 case VMX_EXIT_VMXOFF:
5426 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5427 case VMX_EXIT_VMFUNC:
5428 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5429
5430 /*
5431 * Instructions that cause VM-exits unconditionally or the condition is
5432 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5433 * happens, it's guaranteed to be a nested-guest VM-exit).
5434 *
5435 * - Provides VM-exit instruction length.
5436 * - Provides VM-exit information.
5437 * - Optionally provides Exit qualification.
5438 *
5439 * Since Exit qualification is 0 for all VM-exits where it is not
5440 * applicable, reading and passing it to the guest should produce
5441 * defined behavior.
5442 *
5443 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5444 */
5445 case VMX_EXIT_INVEPT: /* Unconditional. */
5446 case VMX_EXIT_INVVPID:
5447 case VMX_EXIT_VMCLEAR:
5448 case VMX_EXIT_VMPTRLD:
5449 case VMX_EXIT_VMPTRST:
5450 case VMX_EXIT_VMXON:
5451 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5452 case VMX_EXIT_LDTR_TR_ACCESS:
5453 case VMX_EXIT_RDRAND:
5454 case VMX_EXIT_RDSEED:
5455 case VMX_EXIT_XSAVES:
5456 case VMX_EXIT_XRSTORS:
5457 case VMX_EXIT_UMWAIT:
5458 case VMX_EXIT_TPAUSE:
5459 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5460
5461 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5462 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5463 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5464 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5465 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5466 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5480
5481 case VMX_EXIT_PREEMPT_TIMER:
5482 {
5483 /** @todo NSTVMX: Preempt timer. */
5484 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5485 }
5486
5487 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5489
5490 case VMX_EXIT_VMREAD:
5491 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5492
5493 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5495
5496 case VMX_EXIT_INIT_SIGNAL:
5497 case VMX_EXIT_SIPI:
5498 case VMX_EXIT_IO_SMI:
5499 case VMX_EXIT_SMI:
5500 case VMX_EXIT_ERR_MSR_LOAD:
5501 case VMX_EXIT_ERR_MACHINE_CHECK:
5502 case VMX_EXIT_PML_FULL:
5503 case VMX_EXIT_RSM:
5504 default:
5505 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5506 }
5507}
5508#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5509
5510
5511/** @name VM-exit helpers.
5512 * @{
5513 */
5514/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5515/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5516/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5517
5518/** Macro for VM-exits called unexpectedly. */
5519#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5520 do { \
5521 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5522 return VERR_VMX_UNEXPECTED_EXIT; \
5523 } while (0)
5524
5525#ifdef VBOX_STRICT
5526# ifndef IN_NEM_DARWIN
5527/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5528# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5529 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5530
5531# define HMVMX_ASSERT_PREEMPT_CPUID() \
5532 do { \
5533 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5534 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5535 } while (0)
5536
5537# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5538 do { \
5539 AssertPtr((a_pVCpu)); \
5540 AssertPtr((a_pVmxTransient)); \
5541 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5542 Assert((a_pVmxTransient)->pVmcsInfo); \
5543 Assert(ASMIntAreEnabled()); \
5544 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5545 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5546 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5547 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5548 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5549 HMVMX_ASSERT_PREEMPT_CPUID(); \
5550 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5551 } while (0)
5552# else
5553# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5554# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5555# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5556 do { \
5557 AssertPtr((a_pVCpu)); \
5558 AssertPtr((a_pVmxTransient)); \
5559 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5560 Assert((a_pVmxTransient)->pVmcsInfo); \
5561 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5562 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5563 } while (0)
5564# endif
5565
5566# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5567 do { \
5568 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5569 Assert((a_pVmxTransient)->fIsNestedGuest); \
5570 } while (0)
5571
5572# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5573 do { \
5574 Log4Func(("\n")); \
5575 } while (0)
5576#else
5577# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5578 do { \
5579 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5580 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5581 } while (0)
5582
5583# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5584 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5585
5586# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5587#endif
5588
5589#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5590/** Macro that does the necessary privilege checks and intercepted VM-exits for
5591 * guests that attempted to execute a VMX instruction. */
5592# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5593 do \
5594 { \
5595 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5596 if (rcStrictTmp == VINF_SUCCESS) \
5597 { /* likely */ } \
5598 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5599 { \
5600 Assert((a_pVCpu)->hm.s.Event.fPending); \
5601 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5602 return VINF_SUCCESS; \
5603 } \
5604 else \
5605 { \
5606 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5607 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5608 } \
5609 } while (0)
5610
5611/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5612# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5613 do \
5614 { \
5615 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5616 (a_pGCPtrEffAddr)); \
5617 if (rcStrictTmp == VINF_SUCCESS) \
5618 { /* likely */ } \
5619 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5620 { \
5621 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5622 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5623 NOREF(uXcptTmp); \
5624 return VINF_SUCCESS; \
5625 } \
5626 else \
5627 { \
5628 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5629 return rcStrictTmp; \
5630 } \
5631 } while (0)
5632#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5633
5634
5635/**
5636 * Advances the guest RIP by the specified number of bytes.
5637 *
5638 * @param pVCpu The cross context virtual CPU structure.
5639 * @param cbInstr Number of bytes to advance the RIP by.
5640 *
5641 * @remarks No-long-jump zone!!!
5642 */
5643DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5644{
5645 /* Advance the RIP. */
5646 pVCpu->cpum.GstCtx.rip += cbInstr;
5647 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5648
5649 /* Update interrupt inhibition. */
5650 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5651 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5652 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5653}
5654
5655
5656/**
5657 * Advances the guest RIP after reading it from the VMCS.
5658 *
5659 * @returns VBox status code, no informational status codes.
5660 * @param pVCpu The cross context virtual CPU structure.
5661 * @param pVmxTransient The VMX-transient structure.
5662 *
5663 * @remarks No-long-jump zone!!!
5664 */
5665static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5666{
5667 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5668 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5669 AssertRCReturn(rc, rc);
5670
5671 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5672 return VINF_SUCCESS;
5673}
5674
5675
5676/**
5677 * Handle a condition that occurred while delivering an event through the guest or
5678 * nested-guest IDT.
5679 *
5680 * @returns Strict VBox status code (i.e. informational status codes too).
5681 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5682 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5683 * to continue execution of the guest which will delivery the \#DF.
5684 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5685 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5686 *
5687 * @param pVCpu The cross context virtual CPU structure.
5688 * @param pVmxTransient The VMX-transient structure.
5689 *
5690 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5691 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5692 * is due to an EPT violation, PML full or SPP-related event.
5693 *
5694 * @remarks No-long-jump zone!!!
5695 */
5696static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5697{
5698 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5699 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5700 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5701 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5702 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5703 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5704
5705 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5706 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5707 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5708 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5709 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5710 {
5711 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5712 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5713
5714 /*
5715 * If the event was a software interrupt (generated with INT n) or a software exception
5716 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5717 * can handle the VM-exit and continue guest execution which will re-execute the
5718 * instruction rather than re-injecting the exception, as that can cause premature
5719 * trips to ring-3 before injection and involve TRPM which currently has no way of
5720 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5721 * the problem).
5722 */
5723 IEMXCPTRAISE enmRaise;
5724 IEMXCPTRAISEINFO fRaiseInfo;
5725 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5726 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5727 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5728 {
5729 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5730 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5731 }
5732 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5733 {
5734 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5735 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5736 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5737
5738 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5739 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5740
5741 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5742
5743 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5744 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5745 {
5746 pVmxTransient->fVectoringPF = true;
5747 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5748 }
5749 }
5750 else
5751 {
5752 /*
5753 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5754 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5755 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5756 */
5757 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5758 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5759 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5760 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5761 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5762 }
5763
5764 /*
5765 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5766 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5767 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5768 * subsequent VM-entry would fail, see @bugref{7445}.
5769 *
5770 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5771 */
5772 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5773 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5774 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5775 && CPUMIsGuestNmiBlocking(pVCpu))
5776 {
5777 CPUMSetGuestNmiBlocking(pVCpu, false);
5778 }
5779
5780 switch (enmRaise)
5781 {
5782 case IEMXCPTRAISE_CURRENT_XCPT:
5783 {
5784 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5785 Assert(rcStrict == VINF_SUCCESS);
5786 break;
5787 }
5788
5789 case IEMXCPTRAISE_PREV_EVENT:
5790 {
5791 uint32_t u32ErrCode;
5792 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5793 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5794 else
5795 u32ErrCode = 0;
5796
5797 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5798 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5799 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5800 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5801
5802 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5803 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5804 Assert(rcStrict == VINF_SUCCESS);
5805 break;
5806 }
5807
5808 case IEMXCPTRAISE_REEXEC_INSTR:
5809 Assert(rcStrict == VINF_SUCCESS);
5810 break;
5811
5812 case IEMXCPTRAISE_DOUBLE_FAULT:
5813 {
5814 /*
5815 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5816 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5817 */
5818 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5819 {
5820 pVmxTransient->fVectoringDoublePF = true;
5821 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5822 pVCpu->cpum.GstCtx.cr2));
5823 rcStrict = VINF_SUCCESS;
5824 }
5825 else
5826 {
5827 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5828 vmxHCSetPendingXcptDF(pVCpu);
5829 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5830 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5831 rcStrict = VINF_HM_DOUBLE_FAULT;
5832 }
5833 break;
5834 }
5835
5836 case IEMXCPTRAISE_TRIPLE_FAULT:
5837 {
5838 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5839 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5840 rcStrict = VINF_EM_RESET;
5841 break;
5842 }
5843
5844 case IEMXCPTRAISE_CPU_HANG:
5845 {
5846 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5847 rcStrict = VERR_EM_GUEST_CPU_HANG;
5848 break;
5849 }
5850
5851 default:
5852 {
5853 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5854 rcStrict = VERR_VMX_IPE_2;
5855 break;
5856 }
5857 }
5858 }
5859 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5860 && !CPUMIsGuestNmiBlocking(pVCpu))
5861 {
5862 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5863 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5864 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5865 {
5866 /*
5867 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5868 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5869 * that virtual NMIs remain blocked until the IRET execution is completed.
5870 *
5871 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5872 */
5873 CPUMSetGuestNmiBlocking(pVCpu, true);
5874 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5875 }
5876 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5877 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5878 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5879 {
5880 /*
5881 * Execution of IRET caused an EPT violation, page-modification log-full event or
5882 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5883 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5884 * that virtual NMIs remain blocked until the IRET execution is completed.
5885 *
5886 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5887 */
5888 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5889 {
5890 CPUMSetGuestNmiBlocking(pVCpu, true);
5891 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5892 }
5893 }
5894 }
5895
5896 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5897 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5898 return rcStrict;
5899}
5900
5901
5902#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5903/**
5904 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5905 * guest attempting to execute a VMX instruction.
5906 *
5907 * @returns Strict VBox status code (i.e. informational status codes too).
5908 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5909 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5910 *
5911 * @param pVCpu The cross context virtual CPU structure.
5912 * @param uExitReason The VM-exit reason.
5913 *
5914 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5915 * @remarks No-long-jump zone!!!
5916 */
5917static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5918{
5919 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5920 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5921
5922 /*
5923 * The physical CPU would have already checked the CPU mode/code segment.
5924 * We shall just assert here for paranoia.
5925 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5926 */
5927 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5928 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5929 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5930
5931 if (uExitReason == VMX_EXIT_VMXON)
5932 {
5933 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5934
5935 /*
5936 * We check CR4.VMXE because it is required to be always set while in VMX operation
5937 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5938 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5939 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5940 */
5941 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5942 {
5943 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5944 vmxHCSetPendingXcptUD(pVCpu);
5945 return VINF_HM_PENDING_XCPT;
5946 }
5947 }
5948 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5949 {
5950 /*
5951 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5952 * (other than VMXON), we need to raise a #UD.
5953 */
5954 Log4Func(("Not in VMX root mode -> #UD\n"));
5955 vmxHCSetPendingXcptUD(pVCpu);
5956 return VINF_HM_PENDING_XCPT;
5957 }
5958
5959 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5960 return VINF_SUCCESS;
5961}
5962
5963
5964/**
5965 * Decodes the memory operand of an instruction that caused a VM-exit.
5966 *
5967 * The Exit qualification field provides the displacement field for memory
5968 * operand instructions, if any.
5969 *
5970 * @returns Strict VBox status code (i.e. informational status codes too).
5971 * @retval VINF_SUCCESS if the operand was successfully decoded.
5972 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5973 * operand.
5974 * @param pVCpu The cross context virtual CPU structure.
5975 * @param uExitInstrInfo The VM-exit instruction information field.
5976 * @param enmMemAccess The memory operand's access type (read or write).
5977 * @param GCPtrDisp The instruction displacement field, if any. For
5978 * RIP-relative addressing pass RIP + displacement here.
5979 * @param pGCPtrMem Where to store the effective destination memory address.
5980 *
5981 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5982 * virtual-8086 mode hence skips those checks while verifying if the
5983 * segment is valid.
5984 */
5985static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5986 PRTGCPTR pGCPtrMem)
5987{
5988 Assert(pGCPtrMem);
5989 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5990 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5991 | CPUMCTX_EXTRN_CR0);
5992
5993 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5994 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5995 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5996
5997 VMXEXITINSTRINFO ExitInstrInfo;
5998 ExitInstrInfo.u = uExitInstrInfo;
5999 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6000 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6001 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6002 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6003 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6004 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6005 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6006 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6007 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6008
6009 /*
6010 * Validate instruction information.
6011 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6012 */
6013 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6014 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6015 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6016 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6017 AssertLogRelMsgReturn(fIsMemOperand,
6018 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6019
6020 /*
6021 * Compute the complete effective address.
6022 *
6023 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6024 * See AMD spec. 4.5.2 "Segment Registers".
6025 */
6026 RTGCPTR GCPtrMem = GCPtrDisp;
6027 if (fBaseRegValid)
6028 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6029 if (fIdxRegValid)
6030 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6031
6032 RTGCPTR const GCPtrOff = GCPtrMem;
6033 if ( !fIsLongMode
6034 || iSegReg >= X86_SREG_FS)
6035 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6036 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6037
6038 /*
6039 * Validate effective address.
6040 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6041 */
6042 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6043 Assert(cbAccess > 0);
6044 if (fIsLongMode)
6045 {
6046 if (X86_IS_CANONICAL(GCPtrMem))
6047 {
6048 *pGCPtrMem = GCPtrMem;
6049 return VINF_SUCCESS;
6050 }
6051
6052 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6053 * "Data Limit Checks in 64-bit Mode". */
6054 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6055 vmxHCSetPendingXcptGP(pVCpu, 0);
6056 return VINF_HM_PENDING_XCPT;
6057 }
6058
6059 /*
6060 * This is a watered down version of iemMemApplySegment().
6061 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6062 * and segment CPL/DPL checks are skipped.
6063 */
6064 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6065 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6066 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6067
6068 /* Check if the segment is present and usable. */
6069 if ( pSel->Attr.n.u1Present
6070 && !pSel->Attr.n.u1Unusable)
6071 {
6072 Assert(pSel->Attr.n.u1DescType);
6073 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6074 {
6075 /* Check permissions for the data segment. */
6076 if ( enmMemAccess == VMXMEMACCESS_WRITE
6077 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6078 {
6079 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6080 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6081 return VINF_HM_PENDING_XCPT;
6082 }
6083
6084 /* Check limits if it's a normal data segment. */
6085 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6086 {
6087 if ( GCPtrFirst32 > pSel->u32Limit
6088 || GCPtrLast32 > pSel->u32Limit)
6089 {
6090 Log4Func(("Data segment limit exceeded. "
6091 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6092 GCPtrLast32, pSel->u32Limit));
6093 if (iSegReg == X86_SREG_SS)
6094 vmxHCSetPendingXcptSS(pVCpu, 0);
6095 else
6096 vmxHCSetPendingXcptGP(pVCpu, 0);
6097 return VINF_HM_PENDING_XCPT;
6098 }
6099 }
6100 else
6101 {
6102 /* Check limits if it's an expand-down data segment.
6103 Note! The upper boundary is defined by the B bit, not the G bit! */
6104 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6105 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6106 {
6107 Log4Func(("Expand-down data segment limit exceeded. "
6108 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6109 GCPtrLast32, pSel->u32Limit));
6110 if (iSegReg == X86_SREG_SS)
6111 vmxHCSetPendingXcptSS(pVCpu, 0);
6112 else
6113 vmxHCSetPendingXcptGP(pVCpu, 0);
6114 return VINF_HM_PENDING_XCPT;
6115 }
6116 }
6117 }
6118 else
6119 {
6120 /* Check permissions for the code segment. */
6121 if ( enmMemAccess == VMXMEMACCESS_WRITE
6122 || ( enmMemAccess == VMXMEMACCESS_READ
6123 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6124 {
6125 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6126 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6127 vmxHCSetPendingXcptGP(pVCpu, 0);
6128 return VINF_HM_PENDING_XCPT;
6129 }
6130
6131 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6132 if ( GCPtrFirst32 > pSel->u32Limit
6133 || GCPtrLast32 > pSel->u32Limit)
6134 {
6135 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6136 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6137 if (iSegReg == X86_SREG_SS)
6138 vmxHCSetPendingXcptSS(pVCpu, 0);
6139 else
6140 vmxHCSetPendingXcptGP(pVCpu, 0);
6141 return VINF_HM_PENDING_XCPT;
6142 }
6143 }
6144 }
6145 else
6146 {
6147 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6148 vmxHCSetPendingXcptGP(pVCpu, 0);
6149 return VINF_HM_PENDING_XCPT;
6150 }
6151
6152 *pGCPtrMem = GCPtrMem;
6153 return VINF_SUCCESS;
6154}
6155#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6156
6157
6158/**
6159 * VM-exit helper for LMSW.
6160 */
6161static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6162{
6163 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6164 AssertRCReturn(rc, rc);
6165
6166 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6167 AssertMsg( rcStrict == VINF_SUCCESS
6168 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6169
6170 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6171 if (rcStrict == VINF_IEM_RAISED_XCPT)
6172 {
6173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6174 rcStrict = VINF_SUCCESS;
6175 }
6176
6177 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6178 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6179 return rcStrict;
6180}
6181
6182
6183/**
6184 * VM-exit helper for CLTS.
6185 */
6186static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6187{
6188 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6189 AssertRCReturn(rc, rc);
6190
6191 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6192 AssertMsg( rcStrict == VINF_SUCCESS
6193 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6194
6195 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6196 if (rcStrict == VINF_IEM_RAISED_XCPT)
6197 {
6198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6199 rcStrict = VINF_SUCCESS;
6200 }
6201
6202 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6203 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6204 return rcStrict;
6205}
6206
6207
6208/**
6209 * VM-exit helper for MOV from CRx (CRx read).
6210 */
6211static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6212{
6213 Assert(iCrReg < 16);
6214 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6215
6216 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6217 AssertRCReturn(rc, rc);
6218
6219 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6220 AssertMsg( rcStrict == VINF_SUCCESS
6221 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6222
6223 if (iGReg == X86_GREG_xSP)
6224 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6225 else
6226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6227#ifdef VBOX_WITH_STATISTICS
6228 switch (iCrReg)
6229 {
6230 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6231 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6232 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6233 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6234 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6235 }
6236#endif
6237 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6238 return rcStrict;
6239}
6240
6241
6242/**
6243 * VM-exit helper for MOV to CRx (CRx write).
6244 */
6245static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6246{
6247 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6248
6249 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6250 AssertMsg( rcStrict == VINF_SUCCESS
6251 || rcStrict == VINF_IEM_RAISED_XCPT
6252 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6253
6254 switch (iCrReg)
6255 {
6256 case 0:
6257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6258 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6259 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6260 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6261 break;
6262
6263 case 2:
6264 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6265 /* Nothing to do here, CR2 it's not part of the VMCS. */
6266 break;
6267
6268 case 3:
6269 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6270 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6271 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6272 break;
6273
6274 case 4:
6275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6276 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6277#ifndef IN_NEM_DARWIN
6278 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6279 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6280#else
6281 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6282#endif
6283 break;
6284
6285 case 8:
6286 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6287 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6288 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6289 break;
6290
6291 default:
6292 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6293 break;
6294 }
6295
6296 if (rcStrict == VINF_IEM_RAISED_XCPT)
6297 {
6298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6299 rcStrict = VINF_SUCCESS;
6300 }
6301 return rcStrict;
6302}
6303
6304
6305/**
6306 * VM-exit exception handler for \#PF (Page-fault exception).
6307 *
6308 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6309 */
6310static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6311{
6312 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6313 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6314
6315#ifndef IN_NEM_DARWIN
6316 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6317 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6318 { /* likely */ }
6319 else
6320#endif
6321 {
6322#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6323 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6324#endif
6325 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6326 if (!pVmxTransient->fVectoringDoublePF)
6327 {
6328 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6329 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6330 }
6331 else
6332 {
6333 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6334 Assert(!pVmxTransient->fIsNestedGuest);
6335 vmxHCSetPendingXcptDF(pVCpu);
6336 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6337 }
6338 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6339 return VINF_SUCCESS;
6340 }
6341
6342 Assert(!pVmxTransient->fIsNestedGuest);
6343
6344 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6345 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6346 if (pVmxTransient->fVectoringPF)
6347 {
6348 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6349 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6350 }
6351
6352 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6353 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6354 AssertRCReturn(rc, rc);
6355
6356 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6357 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6358
6359 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6360 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6361
6362 Log4Func(("#PF: rc=%Rrc\n", rc));
6363 if (rc == VINF_SUCCESS)
6364 {
6365 /*
6366 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6367 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6368 */
6369 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6370 TRPMResetTrap(pVCpu);
6371 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6372 return rc;
6373 }
6374
6375 if (rc == VINF_EM_RAW_GUEST_TRAP)
6376 {
6377 if (!pVmxTransient->fVectoringDoublePF)
6378 {
6379 /* It's a guest page fault and needs to be reflected to the guest. */
6380 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6381 TRPMResetTrap(pVCpu);
6382 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6383 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6384 uGstErrorCode, pVmxTransient->uExitQual);
6385 }
6386 else
6387 {
6388 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6389 TRPMResetTrap(pVCpu);
6390 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6391 vmxHCSetPendingXcptDF(pVCpu);
6392 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6393 }
6394
6395 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6396 return VINF_SUCCESS;
6397 }
6398
6399 TRPMResetTrap(pVCpu);
6400 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6401 return rc;
6402}
6403
6404
6405/**
6406 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6407 *
6408 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6409 */
6410static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6411{
6412 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6413 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6414
6415 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6416 AssertRCReturn(rc, rc);
6417
6418 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6419 {
6420 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6421 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6422
6423 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6424 * provides VM-exit instruction length. If this causes problem later,
6425 * disassemble the instruction like it's done on AMD-V. */
6426 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6427 AssertRCReturn(rc2, rc2);
6428 return rc;
6429 }
6430
6431 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6432 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6433 return VINF_SUCCESS;
6434}
6435
6436
6437/**
6438 * VM-exit exception handler for \#BP (Breakpoint exception).
6439 *
6440 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6441 */
6442static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6443{
6444 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6445 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6446
6447 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6448 AssertRCReturn(rc, rc);
6449
6450 VBOXSTRICTRC rcStrict;
6451 if (!pVmxTransient->fIsNestedGuest)
6452 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6453 else
6454 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6455
6456 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6457 {
6458 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6459 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6460 rcStrict = VINF_SUCCESS;
6461 }
6462
6463 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6464 return rcStrict;
6465}
6466
6467
6468/**
6469 * VM-exit exception handler for \#AC (Alignment-check exception).
6470 *
6471 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6472 */
6473static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6474{
6475 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6476
6477 /*
6478 * Detect #ACs caused by host having enabled split-lock detection.
6479 * Emulate such instructions.
6480 */
6481 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6482 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6483 AssertRCReturn(rc, rc);
6484 /** @todo detect split lock in cpu feature? */
6485 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6486 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6487 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6488 || CPUMGetGuestCPL(pVCpu) != 3
6489 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6490 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6491 {
6492 /*
6493 * Check for debug/trace events and import state accordingly.
6494 */
6495 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6496 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6497 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6498#ifndef IN_NEM_DARWIN
6499 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6500#endif
6501 )
6502 {
6503 if (pVM->cCpus == 1)
6504 {
6505#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6506 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6507#else
6508 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6509#endif
6510 AssertRCReturn(rc, rc);
6511 }
6512 }
6513 else
6514 {
6515 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6516 AssertRCReturn(rc, rc);
6517
6518 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6519
6520 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6521 {
6522 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6523 if (rcStrict != VINF_SUCCESS)
6524 return rcStrict;
6525 }
6526 }
6527
6528 /*
6529 * Emulate the instruction.
6530 *
6531 * We have to ignore the LOCK prefix here as we must not retrigger the
6532 * detection on the host. This isn't all that satisfactory, though...
6533 */
6534 if (pVM->cCpus == 1)
6535 {
6536 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6537 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6538
6539 /** @todo For SMP configs we should do a rendezvous here. */
6540 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6541 if (rcStrict == VINF_SUCCESS)
6542#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6543 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6544 HM_CHANGED_GUEST_RIP
6545 | HM_CHANGED_GUEST_RFLAGS
6546 | HM_CHANGED_GUEST_GPRS_MASK
6547 | HM_CHANGED_GUEST_CS
6548 | HM_CHANGED_GUEST_SS);
6549#else
6550 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6551#endif
6552 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6553 {
6554 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6555 rcStrict = VINF_SUCCESS;
6556 }
6557 return rcStrict;
6558 }
6559 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6560 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6561 return VINF_EM_EMULATE_SPLIT_LOCK;
6562 }
6563
6564 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6565 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6566 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6567
6568 /* Re-inject it. We'll detect any nesting before getting here. */
6569 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6570 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6571 return VINF_SUCCESS;
6572}
6573
6574
6575/**
6576 * VM-exit exception handler for \#DB (Debug exception).
6577 *
6578 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6579 */
6580static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6581{
6582 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6583 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6584
6585 /*
6586 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6587 */
6588 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6589
6590 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6591 uint64_t const uDR6 = X86_DR6_INIT_VAL
6592 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6593 | X86_DR6_BD | X86_DR6_BS));
6594
6595 int rc;
6596 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6597 if (!pVmxTransient->fIsNestedGuest)
6598 {
6599 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6600
6601 /*
6602 * Prevents stepping twice over the same instruction when the guest is stepping using
6603 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6604 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6605 */
6606 if ( rc == VINF_EM_DBG_STEPPED
6607 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6608 {
6609 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6610 rc = VINF_EM_RAW_GUEST_TRAP;
6611 }
6612 }
6613 else
6614 rc = VINF_EM_RAW_GUEST_TRAP;
6615 Log6Func(("rc=%Rrc\n", rc));
6616 if (rc == VINF_EM_RAW_GUEST_TRAP)
6617 {
6618 /*
6619 * The exception was for the guest. Update DR6, DR7.GD and
6620 * IA32_DEBUGCTL.LBR before forwarding it.
6621 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6622 */
6623#ifndef IN_NEM_DARWIN
6624 VMMRZCallRing3Disable(pVCpu);
6625 HM_DISABLE_PREEMPT(pVCpu);
6626
6627 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6628 pCtx->dr[6] |= uDR6;
6629 if (CPUMIsGuestDebugStateActive(pVCpu))
6630 ASMSetDR6(pCtx->dr[6]);
6631
6632 HM_RESTORE_PREEMPT();
6633 VMMRZCallRing3Enable(pVCpu);
6634#else
6635 /** @todo */
6636#endif
6637
6638 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6639 AssertRCReturn(rc, rc);
6640
6641 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6642 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6643
6644 /* Paranoia. */
6645 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6646 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6647
6648 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6649 AssertRC(rc);
6650
6651 /*
6652 * Raise #DB in the guest.
6653 *
6654 * It is important to reflect exactly what the VM-exit gave us (preserving the
6655 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6656 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6657 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6658 *
6659 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6660 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6661 */
6662 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6663 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6664 return VINF_SUCCESS;
6665 }
6666
6667 /*
6668 * Not a guest trap, must be a hypervisor related debug event then.
6669 * Update DR6 in case someone is interested in it.
6670 */
6671 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6672 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6673 CPUMSetHyperDR6(pVCpu, uDR6);
6674
6675 return rc;
6676}
6677
6678
6679/**
6680 * Hacks its way around the lovely mesa driver's backdoor accesses.
6681 *
6682 * @sa hmR0SvmHandleMesaDrvGp.
6683 */
6684static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6685{
6686 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6687 RT_NOREF(pCtx);
6688
6689 /* For now we'll just skip the instruction. */
6690 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6691}
6692
6693
6694/**
6695 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6696 * backdoor logging w/o checking what it is running inside.
6697 *
6698 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6699 * backdoor port and magic numbers loaded in registers.
6700 *
6701 * @returns true if it is, false if it isn't.
6702 * @sa hmR0SvmIsMesaDrvGp.
6703 */
6704DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6705{
6706 /* 0xed: IN eAX,dx */
6707 uint8_t abInstr[1];
6708 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6709 return false;
6710
6711 /* Check that it is #GP(0). */
6712 if (pVmxTransient->uExitIntErrorCode != 0)
6713 return false;
6714
6715 /* Check magic and port. */
6716 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6717 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6718 if (pCtx->rax != UINT32_C(0x564d5868))
6719 return false;
6720 if (pCtx->dx != UINT32_C(0x5658))
6721 return false;
6722
6723 /* Flat ring-3 CS. */
6724 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6725 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6726 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6727 if (pCtx->cs.Attr.n.u2Dpl != 3)
6728 return false;
6729 if (pCtx->cs.u64Base != 0)
6730 return false;
6731
6732 /* Check opcode. */
6733 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6734 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6735 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6736 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6737 if (RT_FAILURE(rc))
6738 return false;
6739 if (abInstr[0] != 0xed)
6740 return false;
6741
6742 return true;
6743}
6744
6745
6746/**
6747 * VM-exit exception handler for \#GP (General-protection exception).
6748 *
6749 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6750 */
6751static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6752{
6753 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6754 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6755
6756 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6757 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6758#ifndef IN_NEM_DARWIN
6759 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6760 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6761 { /* likely */ }
6762 else
6763#endif
6764 {
6765#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6766# ifndef IN_NEM_DARWIN
6767 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6768# else
6769 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6770# endif
6771#endif
6772 /*
6773 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6774 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6775 */
6776 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6777 AssertRCReturn(rc, rc);
6778 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6779 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6780
6781 if ( pVmxTransient->fIsNestedGuest
6782 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6783 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6784 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6785 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6786 else
6787 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6788 return rc;
6789 }
6790
6791#ifndef IN_NEM_DARWIN
6792 Assert(CPUMIsGuestInRealModeEx(pCtx));
6793 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6794 Assert(!pVmxTransient->fIsNestedGuest);
6795
6796 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6797 AssertRCReturn(rc, rc);
6798
6799 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6800 if (rcStrict == VINF_SUCCESS)
6801 {
6802 if (!CPUMIsGuestInRealModeEx(pCtx))
6803 {
6804 /*
6805 * The guest is no longer in real-mode, check if we can continue executing the
6806 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6807 */
6808 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6809 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6810 {
6811 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6812 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6813 }
6814 else
6815 {
6816 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6817 rcStrict = VINF_EM_RESCHEDULE;
6818 }
6819 }
6820 else
6821 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6822 }
6823 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6824 {
6825 rcStrict = VINF_SUCCESS;
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6827 }
6828 return VBOXSTRICTRC_VAL(rcStrict);
6829#endif
6830}
6831
6832
6833/**
6834 * VM-exit exception handler wrapper for all other exceptions that are not handled
6835 * by a specific handler.
6836 *
6837 * This simply re-injects the exception back into the VM without any special
6838 * processing.
6839 *
6840 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6841 */
6842static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6843{
6844 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6845
6846#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6847# ifndef IN_NEM_DARWIN
6848 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6849 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6850 ("uVector=%#x u32XcptBitmap=%#X32\n",
6851 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6852 NOREF(pVmcsInfo);
6853# endif
6854#endif
6855
6856 /*
6857 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6858 * would have been handled while checking exits due to event delivery.
6859 */
6860 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6861
6862#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6863 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6864 AssertRCReturn(rc, rc);
6865 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6866#endif
6867
6868#ifdef VBOX_WITH_STATISTICS
6869 switch (uVector)
6870 {
6871 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6872 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6873 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6874 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6875 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6876 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6877 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6878 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6879 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6880 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6881 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6882 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6883 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6884 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6885 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6886 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6887 default:
6888 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6889 break;
6890 }
6891#endif
6892
6893 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6894 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6895 NOREF(uVector);
6896
6897 /* Re-inject the original exception into the guest. */
6898 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6899 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6900 return VINF_SUCCESS;
6901}
6902
6903
6904/**
6905 * VM-exit exception handler for all exceptions (except NMIs!).
6906 *
6907 * @remarks This may be called for both guests and nested-guests. Take care to not
6908 * make assumptions and avoid doing anything that is not relevant when
6909 * executing a nested-guest (e.g., Mesa driver hacks).
6910 */
6911static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6912{
6913 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6914
6915 /*
6916 * If this VM-exit occurred while delivering an event through the guest IDT, take
6917 * action based on the return code and additional hints (e.g. for page-faults)
6918 * that will be updated in the VMX transient structure.
6919 */
6920 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6921 if (rcStrict == VINF_SUCCESS)
6922 {
6923 /*
6924 * If an exception caused a VM-exit due to delivery of an event, the original
6925 * event may have to be re-injected into the guest. We shall reinject it and
6926 * continue guest execution. However, page-fault is a complicated case and
6927 * needs additional processing done in vmxHCExitXcptPF().
6928 */
6929 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6930 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6931 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6932 || uVector == X86_XCPT_PF)
6933 {
6934 switch (uVector)
6935 {
6936 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6937 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6938 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6939 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6940 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6941 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6942 default:
6943 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6944 }
6945 }
6946 /* else: inject pending event before resuming guest execution. */
6947 }
6948 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6949 {
6950 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6951 rcStrict = VINF_SUCCESS;
6952 }
6953
6954 return rcStrict;
6955}
6956/** @} */
6957
6958
6959/** @name VM-exit handlers.
6960 * @{
6961 */
6962/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6963/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6964/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6965
6966/**
6967 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6968 */
6969HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6970{
6971 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6972 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6973
6974#ifndef IN_NEM_DARWIN
6975 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6976 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6977 return VINF_SUCCESS;
6978 return VINF_EM_RAW_INTERRUPT;
6979#else
6980 return VINF_SUCCESS;
6981#endif
6982}
6983
6984
6985/**
6986 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
6987 * VM-exit.
6988 */
6989HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6990{
6991 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6992 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
6993
6994 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
6995
6996 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
6997 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6998 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6999
7000 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7001 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7002 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7003 NOREF(pVmcsInfo);
7004
7005 VBOXSTRICTRC rcStrict;
7006 switch (uExitIntType)
7007 {
7008#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7009 /*
7010 * Host physical NMIs:
7011 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7012 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7013 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7014 *
7015 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7016 * See Intel spec. 27.5.5 "Updating Non-Register State".
7017 */
7018 case VMX_EXIT_INT_INFO_TYPE_NMI:
7019 {
7020 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7021 break;
7022 }
7023#endif
7024
7025 /*
7026 * Privileged software exceptions (#DB from ICEBP),
7027 * Software exceptions (#BP and #OF),
7028 * Hardware exceptions:
7029 * Process the required exceptions and resume guest execution if possible.
7030 */
7031 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7032 Assert(uVector == X86_XCPT_DB);
7033 RT_FALL_THRU();
7034 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7035 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7036 RT_FALL_THRU();
7037 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7038 {
7039 NOREF(uVector);
7040 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7041 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7042 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7043 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7044
7045 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7046 break;
7047 }
7048
7049 default:
7050 {
7051 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7052 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7053 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7054 break;
7055 }
7056 }
7057
7058 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7059 return rcStrict;
7060}
7061
7062
7063/**
7064 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7065 */
7066HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7067{
7068 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7069
7070 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7071 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7072 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7073
7074 /* Evaluate and deliver pending events and resume guest execution. */
7075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7076 return VINF_SUCCESS;
7077}
7078
7079
7080/**
7081 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7082 */
7083HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7084{
7085 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7086
7087 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7088 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7089 {
7090 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7091 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7092 }
7093
7094 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7095
7096 /*
7097 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7098 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7099 */
7100 uint32_t fIntrState;
7101 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7102 AssertRC(rc);
7103 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7104 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7105 {
7106 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7107 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7108
7109 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7110 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7111 AssertRC(rc);
7112 }
7113
7114 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7115 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7116
7117 /* Evaluate and deliver pending events and resume guest execution. */
7118 return VINF_SUCCESS;
7119}
7120
7121
7122/**
7123 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7124 */
7125HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7126{
7127 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7128 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7129}
7130
7131
7132/**
7133 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7134 */
7135HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7136{
7137 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7138 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7139}
7140
7141
7142/**
7143 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7144 */
7145HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7146{
7147 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7148
7149 /*
7150 * Get the state we need and update the exit history entry.
7151 */
7152 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7153 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7154
7155 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7156 AssertRCReturn(rc, rc);
7157
7158 VBOXSTRICTRC rcStrict;
7159 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7160 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7161 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7162 if (!pExitRec)
7163 {
7164 /*
7165 * Regular CPUID instruction execution.
7166 */
7167 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7168 if (rcStrict == VINF_SUCCESS)
7169 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7170 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7171 {
7172 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7173 rcStrict = VINF_SUCCESS;
7174 }
7175 }
7176 else
7177 {
7178 /*
7179 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7180 */
7181 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7182 AssertRCReturn(rc2, rc2);
7183
7184 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7185 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7186
7187 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7188 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7189
7190 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7191 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7192 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7193 }
7194 return rcStrict;
7195}
7196
7197
7198/**
7199 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7200 */
7201HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7202{
7203 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7204
7205 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7206 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7207 AssertRCReturn(rc, rc);
7208
7209 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7210 return VINF_EM_RAW_EMULATE_INSTR;
7211
7212 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7213 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7214}
7215
7216
7217/**
7218 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7219 */
7220HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7221{
7222 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7223
7224 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7225 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7226 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7227 AssertRCReturn(rc, rc);
7228
7229 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7230 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7231 {
7232 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7233 we must reset offsetting on VM-entry. See @bugref{6634}. */
7234 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7235 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7237 }
7238 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7239 {
7240 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7241 rcStrict = VINF_SUCCESS;
7242 }
7243 return rcStrict;
7244}
7245
7246
7247/**
7248 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7249 */
7250HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7251{
7252 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7253
7254 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7255 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7256 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7257 AssertRCReturn(rc, rc);
7258
7259 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7260 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7261 {
7262 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7263 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7264 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7265 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7266 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7267 }
7268 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7269 {
7270 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7271 rcStrict = VINF_SUCCESS;
7272 }
7273 return rcStrict;
7274}
7275
7276
7277/**
7278 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7279 */
7280HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7281{
7282 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7283
7284 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7285 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7286 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7287 AssertRCReturn(rc, rc);
7288
7289 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7290 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7291 if (RT_LIKELY(rc == VINF_SUCCESS))
7292 {
7293 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7294 Assert(pVmxTransient->cbExitInstr == 2);
7295 }
7296 else
7297 {
7298 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7299 rc = VERR_EM_INTERPRETER;
7300 }
7301 return rc;
7302}
7303
7304
7305/**
7306 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7307 */
7308HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7309{
7310 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7311
7312 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7313 if (EMAreHypercallInstructionsEnabled(pVCpu))
7314 {
7315 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7316 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7317 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7318 AssertRCReturn(rc, rc);
7319
7320 /* Perform the hypercall. */
7321 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7322 if (rcStrict == VINF_SUCCESS)
7323 {
7324 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7325 AssertRCReturn(rc, rc);
7326 }
7327 else
7328 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7329 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7330 || RT_FAILURE(rcStrict));
7331
7332 /* If the hypercall changes anything other than guest's general-purpose registers,
7333 we would need to reload the guest changed bits here before VM-entry. */
7334 }
7335 else
7336 Log4Func(("Hypercalls not enabled\n"));
7337
7338 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7339 if (RT_FAILURE(rcStrict))
7340 {
7341 vmxHCSetPendingXcptUD(pVCpu);
7342 rcStrict = VINF_SUCCESS;
7343 }
7344
7345 return rcStrict;
7346}
7347
7348
7349/**
7350 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7351 */
7352HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7353{
7354 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7355#ifndef IN_NEM_DARWIN
7356 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7357#endif
7358
7359 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7360 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7361 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7362 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7363 AssertRCReturn(rc, rc);
7364
7365 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7366
7367 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7368 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7369 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7370 {
7371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7372 rcStrict = VINF_SUCCESS;
7373 }
7374 else
7375 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7376 VBOXSTRICTRC_VAL(rcStrict)));
7377 return rcStrict;
7378}
7379
7380
7381/**
7382 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7383 */
7384HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7385{
7386 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7387
7388 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7389 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7390 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7391 AssertRCReturn(rc, rc);
7392
7393 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7394 if (rcStrict == VINF_SUCCESS)
7395 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7396 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7397 {
7398 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7399 rcStrict = VINF_SUCCESS;
7400 }
7401
7402 return rcStrict;
7403}
7404
7405
7406/**
7407 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7408 */
7409HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7410{
7411 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7412
7413 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7414 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7415 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7416 AssertRCReturn(rc, rc);
7417
7418 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7419 if (RT_SUCCESS(rcStrict))
7420 {
7421 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7422 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7423 rcStrict = VINF_SUCCESS;
7424 }
7425
7426 return rcStrict;
7427}
7428
7429
7430/**
7431 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7432 * VM-exit.
7433 */
7434HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7435{
7436 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7437 return VINF_EM_RESET;
7438}
7439
7440
7441/**
7442 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7443 */
7444HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7445{
7446 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7447
7448 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7449 AssertRCReturn(rc, rc);
7450
7451 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7452 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7453 rc = VINF_SUCCESS;
7454 else
7455 rc = VINF_EM_HALT;
7456
7457 if (rc != VINF_SUCCESS)
7458 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7459 return rc;
7460}
7461
7462
7463/**
7464 * VM-exit handler for instructions that result in a \#UD exception delivered to
7465 * the guest.
7466 */
7467HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7468{
7469 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7470 vmxHCSetPendingXcptUD(pVCpu);
7471 return VINF_SUCCESS;
7472}
7473
7474
7475/**
7476 * VM-exit handler for expiry of the VMX-preemption timer.
7477 */
7478HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7479{
7480 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7481
7482 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7483 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7484Log12(("vmxHCExitPreemptTimer:\n"));
7485
7486 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7487 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7488 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7489 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7490 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7491}
7492
7493
7494/**
7495 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7496 */
7497HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7498{
7499 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7500
7501 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7502 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7503 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7504 AssertRCReturn(rc, rc);
7505
7506 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7507 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7508 : HM_CHANGED_RAISED_XCPT_MASK);
7509
7510#ifndef IN_NEM_DARWIN
7511 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7512 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7513 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7514 {
7515 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7516 hmR0VmxUpdateStartVmFunction(pVCpu);
7517 }
7518#endif
7519
7520 return rcStrict;
7521}
7522
7523
7524/**
7525 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7526 */
7527HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7528{
7529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7530
7531 /** @todo Enable the new code after finding a reliably guest test-case. */
7532#if 1
7533 return VERR_EM_INTERPRETER;
7534#else
7535 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7536 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7537 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7538 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7539 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7540 AssertRCReturn(rc, rc);
7541
7542 /* Paranoia. Ensure this has a memory operand. */
7543 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7544
7545 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7546 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7547 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7548 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7549
7550 RTGCPTR GCPtrDesc;
7551 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7552
7553 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7554 GCPtrDesc, uType);
7555 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7556 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7557 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7558 {
7559 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7560 rcStrict = VINF_SUCCESS;
7561 }
7562 return rcStrict;
7563#endif
7564}
7565
7566
7567/**
7568 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7569 * VM-exit.
7570 */
7571HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7572{
7573 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7574 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7575 AssertRCReturn(rc, rc);
7576
7577 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7578 if (RT_FAILURE(rc))
7579 return rc;
7580
7581 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7582 NOREF(uInvalidReason);
7583
7584#ifdef VBOX_STRICT
7585 uint32_t fIntrState;
7586 uint64_t u64Val;
7587 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7588 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7589 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7590
7591 Log4(("uInvalidReason %u\n", uInvalidReason));
7592 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7593 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7594 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7595
7596 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7597 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7598 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7599 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7600 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7601 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7602 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7603 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7604 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7605 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7606 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7607 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7608# ifndef IN_NEM_DARWIN
7609 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7610 {
7611 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7612 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7613 }
7614
7615 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7616# endif
7617#endif
7618
7619 return VERR_VMX_INVALID_GUEST_STATE;
7620}
7621
7622/**
7623 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7624 */
7625HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7626{
7627 /*
7628 * Cumulative notes of all recognized but unexpected VM-exits.
7629 *
7630 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7631 * nested-paging is used.
7632 *
7633 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7634 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7635 * this function (and thereby stop VM execution) for handling such instructions.
7636 *
7637 *
7638 * VMX_EXIT_INIT_SIGNAL:
7639 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7640 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7641 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7642 *
7643 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7644 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7645 * See Intel spec. "23.8 Restrictions on VMX operation".
7646 *
7647 * VMX_EXIT_SIPI:
7648 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7649 * activity state is used. We don't make use of it as our guests don't have direct
7650 * access to the host local APIC.
7651 *
7652 * See Intel spec. 25.3 "Other Causes of VM-exits".
7653 *
7654 * VMX_EXIT_IO_SMI:
7655 * VMX_EXIT_SMI:
7656 * This can only happen if we support dual-monitor treatment of SMI, which can be
7657 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7658 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7659 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7660 *
7661 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7662 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7663 *
7664 * VMX_EXIT_ERR_MSR_LOAD:
7665 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7666 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7667 * execution.
7668 *
7669 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7670 *
7671 * VMX_EXIT_ERR_MACHINE_CHECK:
7672 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7673 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7674 * #MC exception abort class exception is raised. We thus cannot assume a
7675 * reasonable chance of continuing any sort of execution and we bail.
7676 *
7677 * See Intel spec. 15.1 "Machine-check Architecture".
7678 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7679 *
7680 * VMX_EXIT_PML_FULL:
7681 * VMX_EXIT_VIRTUALIZED_EOI:
7682 * VMX_EXIT_APIC_WRITE:
7683 * We do not currently support any of these features and thus they are all unexpected
7684 * VM-exits.
7685 *
7686 * VMX_EXIT_GDTR_IDTR_ACCESS:
7687 * VMX_EXIT_LDTR_TR_ACCESS:
7688 * VMX_EXIT_RDRAND:
7689 * VMX_EXIT_RSM:
7690 * VMX_EXIT_VMFUNC:
7691 * VMX_EXIT_ENCLS:
7692 * VMX_EXIT_RDSEED:
7693 * VMX_EXIT_XSAVES:
7694 * VMX_EXIT_XRSTORS:
7695 * VMX_EXIT_UMWAIT:
7696 * VMX_EXIT_TPAUSE:
7697 * VMX_EXIT_LOADIWKEY:
7698 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7699 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7700 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7701 *
7702 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7703 */
7704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7705 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7706 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7707}
7708
7709
7710/**
7711 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7712 */
7713HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7714{
7715 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7716
7717 /** @todo Optimize this: We currently drag in the whole MSR state
7718 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7719 * MSRs required. That would require changes to IEM and possibly CPUM too.
7720 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7721 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7722 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7723 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7724 switch (idMsr)
7725 {
7726 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7727 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7728 }
7729
7730 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7731 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7732 AssertRCReturn(rc, rc);
7733
7734 Log4Func(("ecx=%#RX32\n", idMsr));
7735
7736#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7737 Assert(!pVmxTransient->fIsNestedGuest);
7738 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7739 {
7740 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7741 && idMsr != MSR_K6_EFER)
7742 {
7743 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7744 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7745 }
7746 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7747 {
7748 Assert(pVmcsInfo->pvMsrBitmap);
7749 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7750 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7751 {
7752 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7753 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7754 }
7755 }
7756 }
7757#endif
7758
7759 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7760 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7761 if (rcStrict == VINF_SUCCESS)
7762 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7763 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7764 {
7765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7766 rcStrict = VINF_SUCCESS;
7767 }
7768 else
7769 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7770 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7771
7772 return rcStrict;
7773}
7774
7775
7776/**
7777 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7778 */
7779HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7780{
7781 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7782
7783 /** @todo Optimize this: We currently drag in the whole MSR state
7784 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7785 * MSRs required. That would require changes to IEM and possibly CPUM too.
7786 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7787 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7788 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7789
7790 /*
7791 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7792 * Although we don't need to fetch the base as it will be overwritten shortly, while
7793 * loading guest-state we would also load the entire segment register including limit
7794 * and attributes and thus we need to load them here.
7795 */
7796 switch (idMsr)
7797 {
7798 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7799 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7800 }
7801
7802 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7803 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7804 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7805 AssertRCReturn(rc, rc);
7806
7807 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7808
7809 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7810 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7811
7812 if (rcStrict == VINF_SUCCESS)
7813 {
7814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7815
7816 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7817 if ( idMsr == MSR_IA32_APICBASE
7818 || ( idMsr >= MSR_IA32_X2APIC_START
7819 && idMsr <= MSR_IA32_X2APIC_END))
7820 {
7821 /*
7822 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7823 * When full APIC register virtualization is implemented we'll have to make
7824 * sure APIC state is saved from the VMCS before IEM changes it.
7825 */
7826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7827 }
7828 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7829 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7830 else if (idMsr == MSR_K6_EFER)
7831 {
7832 /*
7833 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7834 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7835 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7836 */
7837 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7838 }
7839
7840 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7841 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7842 {
7843 switch (idMsr)
7844 {
7845 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7846 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7847 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7848 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7849 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7850 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7851 default:
7852 {
7853#ifndef IN_NEM_DARWIN
7854 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7855 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7856 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7858#else
7859 AssertMsgFailed(("TODO\n"));
7860#endif
7861 break;
7862 }
7863 }
7864 }
7865#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7866 else
7867 {
7868 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7869 switch (idMsr)
7870 {
7871 case MSR_IA32_SYSENTER_CS:
7872 case MSR_IA32_SYSENTER_EIP:
7873 case MSR_IA32_SYSENTER_ESP:
7874 case MSR_K8_FS_BASE:
7875 case MSR_K8_GS_BASE:
7876 {
7877 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7878 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7879 }
7880
7881 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7882 default:
7883 {
7884 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7885 {
7886 /* EFER MSR writes are always intercepted. */
7887 if (idMsr != MSR_K6_EFER)
7888 {
7889 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7890 idMsr));
7891 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7892 }
7893 }
7894
7895 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7896 {
7897 Assert(pVmcsInfo->pvMsrBitmap);
7898 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7899 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7900 {
7901 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7902 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7903 }
7904 }
7905 break;
7906 }
7907 }
7908 }
7909#endif /* VBOX_STRICT */
7910 }
7911 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7912 {
7913 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7914 rcStrict = VINF_SUCCESS;
7915 }
7916 else
7917 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7918 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7919
7920 return rcStrict;
7921}
7922
7923
7924/**
7925 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7926 */
7927HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7928{
7929 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7930
7931 /** @todo The guest has likely hit a contended spinlock. We might want to
7932 * poke a schedule different guest VCPU. */
7933 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7934 if (RT_SUCCESS(rc))
7935 return VINF_EM_RAW_INTERRUPT;
7936
7937 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7938 return rc;
7939}
7940
7941
7942/**
7943 * VM-exit handler for when the TPR value is lowered below the specified
7944 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7945 */
7946HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7947{
7948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7949 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7950
7951 /*
7952 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7953 * We'll re-evaluate pending interrupts and inject them before the next VM
7954 * entry so we can just continue execution here.
7955 */
7956 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7957 return VINF_SUCCESS;
7958}
7959
7960
7961/**
7962 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7963 * VM-exit.
7964 *
7965 * @retval VINF_SUCCESS when guest execution can continue.
7966 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7967 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7968 * incompatible guest state for VMX execution (real-on-v86 case).
7969 */
7970HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7971{
7972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7973 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7974
7975 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7976 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7977 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7978
7979 VBOXSTRICTRC rcStrict;
7980 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7981 uint64_t const uExitQual = pVmxTransient->uExitQual;
7982 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7983 switch (uAccessType)
7984 {
7985 /*
7986 * MOV to CRx.
7987 */
7988 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
7989 {
7990 /*
7991 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
7992 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
7993 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
7994 * PAE PDPTEs as well.
7995 */
7996 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7997 AssertRCReturn(rc, rc);
7998
7999 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8000#ifndef IN_NEM_DARWIN
8001 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8002#endif
8003 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8004 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8005
8006 /*
8007 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8008 * - When nested paging isn't used.
8009 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8010 * - We are executing in the VM debug loop.
8011 */
8012#ifndef IN_NEM_DARWIN
8013 Assert( iCrReg != 3
8014 || !VM_IS_VMX_NESTED_PAGING(pVM)
8015 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8016 || pVCpu->hmr0.s.fUsingDebugLoop);
8017#else
8018 Assert( iCrReg != 3
8019 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8020#endif
8021
8022 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8023 Assert( iCrReg != 8
8024 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8025
8026 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8027 AssertMsg( rcStrict == VINF_SUCCESS
8028 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8029
8030#ifndef IN_NEM_DARWIN
8031 /*
8032 * This is a kludge for handling switches back to real mode when we try to use
8033 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8034 * deal with special selector values, so we have to return to ring-3 and run
8035 * there till the selector values are V86 mode compatible.
8036 *
8037 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8038 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8039 * this function.
8040 */
8041 if ( iCrReg == 0
8042 && rcStrict == VINF_SUCCESS
8043 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8044 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8045 && (uOldCr0 & X86_CR0_PE)
8046 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8047 {
8048 /** @todo Check selectors rather than returning all the time. */
8049 Assert(!pVmxTransient->fIsNestedGuest);
8050 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8051 rcStrict = VINF_EM_RESCHEDULE_REM;
8052 }
8053#endif
8054
8055 break;
8056 }
8057
8058 /*
8059 * MOV from CRx.
8060 */
8061 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8062 {
8063 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8064 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8065
8066 /*
8067 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8068 * - When nested paging isn't used.
8069 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8070 * - We are executing in the VM debug loop.
8071 */
8072#ifndef IN_NEM_DARWIN
8073 Assert( iCrReg != 3
8074 || !VM_IS_VMX_NESTED_PAGING(pVM)
8075 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8076 || pVCpu->hmr0.s.fLeaveDone);
8077#else
8078 Assert( iCrReg != 3
8079 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8080#endif
8081
8082 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8083 Assert( iCrReg != 8
8084 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8085
8086 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8087 break;
8088 }
8089
8090 /*
8091 * CLTS (Clear Task-Switch Flag in CR0).
8092 */
8093 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8094 {
8095 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8096 break;
8097 }
8098
8099 /*
8100 * LMSW (Load Machine-Status Word into CR0).
8101 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8102 */
8103 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8104 {
8105 RTGCPTR GCPtrEffDst;
8106 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8107 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8108 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8109 if (fMemOperand)
8110 {
8111 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8112 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8113 }
8114 else
8115 GCPtrEffDst = NIL_RTGCPTR;
8116 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8117 break;
8118 }
8119
8120 default:
8121 {
8122 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8123 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8124 }
8125 }
8126
8127 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8128 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8129 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8130
8131 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8132 NOREF(pVM);
8133 return rcStrict;
8134}
8135
8136
8137/**
8138 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8139 * VM-exit.
8140 */
8141HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8142{
8143 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8144 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8145
8146 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8147 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8148 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8149 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8150 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8151 | CPUMCTX_EXTRN_EFER);
8152 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8153 AssertRCReturn(rc, rc);
8154
8155 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8156 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8157 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8158 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8159 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8160 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8161 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8162 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8163
8164 /*
8165 * Update exit history to see if this exit can be optimized.
8166 */
8167 VBOXSTRICTRC rcStrict;
8168 PCEMEXITREC pExitRec = NULL;
8169 if ( !fGstStepping
8170 && !fDbgStepping)
8171 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8172 !fIOString
8173 ? !fIOWrite
8174 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8175 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8176 : !fIOWrite
8177 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8178 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8179 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8180 if (!pExitRec)
8181 {
8182 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8183 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8184
8185 uint32_t const cbValue = s_aIOSizes[uIOSize];
8186 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8187 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8188 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8189 if (fIOString)
8190 {
8191 /*
8192 * INS/OUTS - I/O String instruction.
8193 *
8194 * Use instruction-information if available, otherwise fall back on
8195 * interpreting the instruction.
8196 */
8197 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8198 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8199 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8200 if (fInsOutsInfo)
8201 {
8202 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8203 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8204 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8205 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8206 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8207 if (fIOWrite)
8208 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8209 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8210 else
8211 {
8212 /*
8213 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8214 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8215 * See Intel Instruction spec. for "INS".
8216 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8217 */
8218 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8219 }
8220 }
8221 else
8222 rcStrict = IEMExecOne(pVCpu);
8223
8224 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8225 fUpdateRipAlready = true;
8226 }
8227 else
8228 {
8229 /*
8230 * IN/OUT - I/O instruction.
8231 */
8232 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8233 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8234 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8235 if (fIOWrite)
8236 {
8237 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8238 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8239#ifndef IN_NEM_DARWIN
8240 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8241 && !pCtx->eflags.Bits.u1TF)
8242 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8243#endif
8244 }
8245 else
8246 {
8247 uint32_t u32Result = 0;
8248 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8249 if (IOM_SUCCESS(rcStrict))
8250 {
8251 /* Save result of I/O IN instr. in AL/AX/EAX. */
8252 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8253 }
8254#ifndef IN_NEM_DARWIN
8255 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8256 && !pCtx->eflags.Bits.u1TF)
8257 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8258#endif
8259 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8260 }
8261 }
8262
8263 if (IOM_SUCCESS(rcStrict))
8264 {
8265 if (!fUpdateRipAlready)
8266 {
8267 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8269 }
8270
8271 /*
8272 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8273 * while booting Fedora 17 64-bit guest.
8274 *
8275 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8276 */
8277 if (fIOString)
8278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8279
8280 /*
8281 * If any I/O breakpoints are armed, we need to check if one triggered
8282 * and take appropriate action.
8283 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8284 */
8285 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8286 AssertRCReturn(rc, rc);
8287
8288 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8289 * execution engines about whether hyper BPs and such are pending. */
8290 uint32_t const uDr7 = pCtx->dr[7];
8291 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8292 && X86_DR7_ANY_RW_IO(uDr7)
8293 && (pCtx->cr4 & X86_CR4_DE))
8294 || DBGFBpIsHwIoArmed(pVM)))
8295 {
8296 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8297
8298#ifndef IN_NEM_DARWIN
8299 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8300 VMMRZCallRing3Disable(pVCpu);
8301 HM_DISABLE_PREEMPT(pVCpu);
8302
8303 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8304
8305 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8306 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8307 {
8308 /* Raise #DB. */
8309 if (fIsGuestDbgActive)
8310 ASMSetDR6(pCtx->dr[6]);
8311 if (pCtx->dr[7] != uDr7)
8312 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8313
8314 vmxHCSetPendingXcptDB(pVCpu);
8315 }
8316 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8317 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8318 else if ( rcStrict2 != VINF_SUCCESS
8319 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8320 rcStrict = rcStrict2;
8321 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8322
8323 HM_RESTORE_PREEMPT();
8324 VMMRZCallRing3Enable(pVCpu);
8325#else
8326 /** @todo */
8327#endif
8328 }
8329 }
8330
8331#ifdef VBOX_STRICT
8332 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8333 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8334 Assert(!fIOWrite);
8335 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8336 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8337 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8338 Assert(fIOWrite);
8339 else
8340 {
8341# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8342 * statuses, that the VMM device and some others may return. See
8343 * IOM_SUCCESS() for guidance. */
8344 AssertMsg( RT_FAILURE(rcStrict)
8345 || rcStrict == VINF_SUCCESS
8346 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8347 || rcStrict == VINF_EM_DBG_BREAKPOINT
8348 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8349 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8350# endif
8351 }
8352#endif
8353 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8354 }
8355 else
8356 {
8357 /*
8358 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8359 */
8360 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8361 AssertRCReturn(rc2, rc2);
8362 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8363 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8364 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8365 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8366 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8367 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8368
8369 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8371
8372 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8373 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8374 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8375 }
8376 return rcStrict;
8377}
8378
8379
8380/**
8381 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8382 * VM-exit.
8383 */
8384HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8385{
8386 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8387
8388 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8389 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8390 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8391 {
8392 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8393 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8394 {
8395 uint32_t uErrCode;
8396 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8397 {
8398 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8399 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8400 }
8401 else
8402 uErrCode = 0;
8403
8404 RTGCUINTPTR GCPtrFaultAddress;
8405 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8406 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8407 else
8408 GCPtrFaultAddress = 0;
8409
8410 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8411
8412 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8413 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8414
8415 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8416 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8417 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8418 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8419 }
8420 }
8421
8422 /* Fall back to the interpreter to emulate the task-switch. */
8423 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8424 return VERR_EM_INTERPRETER;
8425}
8426
8427
8428/**
8429 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8430 */
8431HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8432{
8433 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8434
8435 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8436 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8437 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8438 AssertRC(rc);
8439 return VINF_EM_DBG_STEPPED;
8440}
8441
8442
8443/**
8444 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8445 */
8446HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8447{
8448 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8449 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8450
8451 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8452 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8453 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8454 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8455 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8456
8457 /*
8458 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8459 */
8460 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8461 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8462 {
8463 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8464 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8465 {
8466 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8467 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8468 }
8469 }
8470 else
8471 {
8472 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8473 return rcStrict;
8474 }
8475
8476 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8477 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8478 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8479 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8480 AssertRCReturn(rc, rc);
8481
8482 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8483 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8484 switch (uAccessType)
8485 {
8486#ifndef IN_NEM_DARWIN
8487 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8488 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8489 {
8490 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8491 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8492 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8493
8494 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8495 GCPhys &= PAGE_BASE_GC_MASK;
8496 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8497 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8498 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8499
8500 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8501 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8502 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8503 if ( rcStrict == VINF_SUCCESS
8504 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8505 || rcStrict == VERR_PAGE_NOT_PRESENT)
8506 {
8507 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8508 | HM_CHANGED_GUEST_APIC_TPR);
8509 rcStrict = VINF_SUCCESS;
8510 }
8511 break;
8512 }
8513#else
8514 /** @todo */
8515#endif
8516
8517 default:
8518 {
8519 Log4Func(("uAccessType=%#x\n", uAccessType));
8520 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8521 break;
8522 }
8523 }
8524
8525 if (rcStrict != VINF_SUCCESS)
8526 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8527 return rcStrict;
8528}
8529
8530
8531/**
8532 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8533 * VM-exit.
8534 */
8535HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8536{
8537 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8538 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8539
8540 /*
8541 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8542 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8543 * must emulate the MOV DRx access.
8544 */
8545 if (!pVmxTransient->fIsNestedGuest)
8546 {
8547 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8548 if (pVmxTransient->fWasGuestDebugStateActive)
8549 {
8550 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8551 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8552 }
8553
8554 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8555 && !pVmxTransient->fWasHyperDebugStateActive)
8556 {
8557 Assert(!DBGFIsStepping(pVCpu));
8558 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8559
8560 /* Don't intercept MOV DRx any more. */
8561 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8562 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8563 AssertRC(rc);
8564
8565#ifndef IN_NEM_DARWIN
8566 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8567 VMMRZCallRing3Disable(pVCpu);
8568 HM_DISABLE_PREEMPT(pVCpu);
8569
8570 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8571 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8572 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8573
8574 HM_RESTORE_PREEMPT();
8575 VMMRZCallRing3Enable(pVCpu);
8576#else
8577 /** @todo */
8578#endif
8579
8580#ifdef VBOX_WITH_STATISTICS
8581 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8582 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8583 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8584 else
8585 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8586#endif
8587 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8588 return VINF_SUCCESS;
8589 }
8590 }
8591
8592 /*
8593 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8594 * The EFER MSR is always up-to-date.
8595 * Update the segment registers and DR7 from the CPU.
8596 */
8597 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8598 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8599 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8600 AssertRCReturn(rc, rc);
8601 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8602
8603 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8604 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8605 {
8606 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8607 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8608 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8609 if (RT_SUCCESS(rc))
8610 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8611 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8612 }
8613 else
8614 {
8615 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8616 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8617 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8618 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8619 }
8620
8621 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8622 if (RT_SUCCESS(rc))
8623 {
8624 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8625 AssertRCReturn(rc2, rc2);
8626 return VINF_SUCCESS;
8627 }
8628 return rc;
8629}
8630
8631
8632/**
8633 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8634 * Conditional VM-exit.
8635 */
8636HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8637{
8638 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8639
8640#ifndef IN_NEM_DARWIN
8641 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8642
8643 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8644 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8645 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8646 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8647 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8648
8649 /*
8650 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8651 */
8652 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8653 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8654 {
8655 /*
8656 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8657 * instruction emulation to inject the original event. Otherwise, injecting the original event
8658 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8659 */
8660 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8661 { /* likely */ }
8662 else
8663 {
8664 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8665#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8666 /** @todo NSTVMX: Think about how this should be handled. */
8667 if (pVmxTransient->fIsNestedGuest)
8668 return VERR_VMX_IPE_3;
8669#endif
8670 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8671 }
8672 }
8673 else
8674 {
8675 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8676 return rcStrict;
8677 }
8678
8679 /*
8680 * Get sufficient state and update the exit history entry.
8681 */
8682 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8683 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8684 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8685 AssertRCReturn(rc, rc);
8686
8687 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8688 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8689 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8690 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8691 if (!pExitRec)
8692 {
8693 /*
8694 * If we succeed, resume guest execution.
8695 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8696 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8697 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8698 * weird case. See @bugref{6043}.
8699 */
8700 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8701 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8702/** @todo bird: We can probably just go straight to IOM here and assume that
8703 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8704 * well. However, we need to address that aliasing workarounds that
8705 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8706 *
8707 * Might also be interesting to see if we can get this done more or
8708 * less locklessly inside IOM. Need to consider the lookup table
8709 * updating and use a bit more carefully first (or do all updates via
8710 * rendezvous) */
8711 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8712 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8713 if ( rcStrict == VINF_SUCCESS
8714 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8715 || rcStrict == VERR_PAGE_NOT_PRESENT)
8716 {
8717 /* Successfully handled MMIO operation. */
8718 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8719 | HM_CHANGED_GUEST_APIC_TPR);
8720 rcStrict = VINF_SUCCESS;
8721 }
8722 }
8723 else
8724 {
8725 /*
8726 * Frequent exit or something needing probing. Call EMHistoryExec.
8727 */
8728 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8729 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8730
8731 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8733
8734 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8735 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8736 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8737 }
8738 return rcStrict;
8739#else
8740 AssertFailed();
8741 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8742#endif
8743}
8744
8745
8746/**
8747 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8748 * VM-exit.
8749 */
8750HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8751{
8752 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8753#ifndef IN_NEM_DARWIN
8754 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8755
8756 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8757 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8758 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8759 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8760 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8761 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8762
8763 /*
8764 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8765 */
8766 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8767 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8768 {
8769 /*
8770 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8771 * we shall resolve the nested #PF and re-inject the original event.
8772 */
8773 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8774 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8775 }
8776 else
8777 {
8778 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8779 return rcStrict;
8780 }
8781
8782 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8783 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8784 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8785 AssertRCReturn(rc, rc);
8786
8787 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8788 uint64_t const uExitQual = pVmxTransient->uExitQual;
8789 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8790
8791 RTGCUINT uErrorCode = 0;
8792 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8793 uErrorCode |= X86_TRAP_PF_ID;
8794 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8795 uErrorCode |= X86_TRAP_PF_RW;
8796 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8797 uErrorCode |= X86_TRAP_PF_P;
8798
8799 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8800 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8801
8802 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8803
8804 /*
8805 * Handle the pagefault trap for the nested shadow table.
8806 */
8807 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8808 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8809 TRPMResetTrap(pVCpu);
8810
8811 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8812 if ( rcStrict == VINF_SUCCESS
8813 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8814 || rcStrict == VERR_PAGE_NOT_PRESENT)
8815 {
8816 /* Successfully synced our nested page tables. */
8817 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8818 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8819 return VINF_SUCCESS;
8820 }
8821#else
8822 PVM pVM = pVCpu->CTX_SUFF(pVM);
8823 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8824 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8825 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8826 vmxHCImportGuestRip(pVCpu);
8827 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8828
8829 /*
8830 * Ask PGM for information about the given GCPhys. We need to check if we're
8831 * out of sync first.
8832 */
8833 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8834 PGMPHYSNEMPAGEINFO Info;
8835 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8836 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8837 if (RT_SUCCESS(rc))
8838 {
8839 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8840 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8841 {
8842 if (State.fCanResume)
8843 {
8844 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8845 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8846 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8847 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8848 State.fDidSomething ? "" : " no-change"));
8849 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8850 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8851 return VINF_SUCCESS;
8852 }
8853 }
8854
8855 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8856 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8857 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8858 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8859 State.fDidSomething ? "" : " no-change"));
8860 }
8861 else
8862 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8863 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8864 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8865
8866 /*
8867 * Emulate the memory access, either access handler or special memory.
8868 */
8869 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8870 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8871 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8872 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8873 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8874
8875 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8876 AssertRCReturn(rc, rc);
8877
8878 VBOXSTRICTRC rcStrict;
8879 if (!pExitRec)
8880 rcStrict = IEMExecOne(pVCpu);
8881 else
8882 {
8883 /* Frequent access or probing. */
8884 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8885 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8886 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8887 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8888 }
8889
8890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8891#endif
8892
8893 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8894 return rcStrict;
8895}
8896
8897
8898#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8899/**
8900 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8901 */
8902HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8903{
8904 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8905
8906 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8907 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8908 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8909 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8910 | CPUMCTX_EXTRN_HWVIRT
8911 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8912 AssertRCReturn(rc, rc);
8913
8914 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8915
8916 VMXVEXITINFO ExitInfo;
8917 RT_ZERO(ExitInfo);
8918 ExitInfo.uReason = pVmxTransient->uExitReason;
8919 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8920 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8921 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8922 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8923
8924 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8925 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8927 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8928 {
8929 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8930 rcStrict = VINF_SUCCESS;
8931 }
8932 return rcStrict;
8933}
8934
8935
8936/**
8937 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8938 */
8939HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8940{
8941 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8942
8943 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8944 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8945 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8946 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8947 AssertRCReturn(rc, rc);
8948
8949 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8950
8951 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8952 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8953 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8954 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8955 {
8956 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8957 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8958 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8959 }
8960 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8961 return rcStrict;
8962}
8963
8964
8965/**
8966 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8967 */
8968HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8969{
8970 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8971
8972 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8973 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8974 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8975 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8976 | CPUMCTX_EXTRN_HWVIRT
8977 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8978 AssertRCReturn(rc, rc);
8979
8980 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8981
8982 VMXVEXITINFO ExitInfo;
8983 RT_ZERO(ExitInfo);
8984 ExitInfo.uReason = pVmxTransient->uExitReason;
8985 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8986 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8987 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8988 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8989
8990 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
8991 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8992 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8993 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8994 {
8995 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8996 rcStrict = VINF_SUCCESS;
8997 }
8998 return rcStrict;
8999}
9000
9001
9002/**
9003 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9004 */
9005HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9006{
9007 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9008
9009 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9010 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9011 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9012 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9013 | CPUMCTX_EXTRN_HWVIRT
9014 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9015 AssertRCReturn(rc, rc);
9016
9017 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9018
9019 VMXVEXITINFO ExitInfo;
9020 RT_ZERO(ExitInfo);
9021 ExitInfo.uReason = pVmxTransient->uExitReason;
9022 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9023 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9024 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9025 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9026
9027 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9028 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9029 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9030 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9031 {
9032 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9033 rcStrict = VINF_SUCCESS;
9034 }
9035 return rcStrict;
9036}
9037
9038
9039/**
9040 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9041 */
9042HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9043{
9044 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9045
9046 /*
9047 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9048 * thus might not need to import the shadow VMCS state, it's safer just in case
9049 * code elsewhere dares look at unsynced VMCS fields.
9050 */
9051 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9052 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9053 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9054 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9055 | CPUMCTX_EXTRN_HWVIRT
9056 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9057 AssertRCReturn(rc, rc);
9058
9059 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9060
9061 VMXVEXITINFO ExitInfo;
9062 RT_ZERO(ExitInfo);
9063 ExitInfo.uReason = pVmxTransient->uExitReason;
9064 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9065 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9066 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9067 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9068 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9069
9070 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9071 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9072 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9073 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9074 {
9075 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9076 rcStrict = VINF_SUCCESS;
9077 }
9078 return rcStrict;
9079}
9080
9081
9082/**
9083 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9084 */
9085HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9086{
9087 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9088
9089 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9090 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9091 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9092 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9093 AssertRCReturn(rc, rc);
9094
9095 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9096
9097 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9098 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9099 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9100 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9101 {
9102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9103 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9104 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9105 }
9106 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9107 return rcStrict;
9108}
9109
9110
9111/**
9112 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9113 */
9114HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9115{
9116 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9117
9118 /*
9119 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9120 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9121 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9122 */
9123 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9124 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9125 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9126 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9127 | CPUMCTX_EXTRN_HWVIRT
9128 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9129 AssertRCReturn(rc, rc);
9130
9131 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9132
9133 VMXVEXITINFO ExitInfo;
9134 RT_ZERO(ExitInfo);
9135 ExitInfo.uReason = pVmxTransient->uExitReason;
9136 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9137 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9138 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9139 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9140 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9141
9142 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9143 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9144 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9145 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9146 {
9147 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9148 rcStrict = VINF_SUCCESS;
9149 }
9150 return rcStrict;
9151}
9152
9153
9154/**
9155 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9156 */
9157HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9158{
9159 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9160
9161 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9162 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9163 | CPUMCTX_EXTRN_HWVIRT
9164 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9165 AssertRCReturn(rc, rc);
9166
9167 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9168
9169 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9170 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9172 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9173 {
9174 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9175 rcStrict = VINF_SUCCESS;
9176 }
9177 return rcStrict;
9178}
9179
9180
9181/**
9182 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9183 */
9184HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9185{
9186 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9187
9188 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9189 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9190 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9191 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9192 | CPUMCTX_EXTRN_HWVIRT
9193 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9194 AssertRCReturn(rc, rc);
9195
9196 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9197
9198 VMXVEXITINFO ExitInfo;
9199 RT_ZERO(ExitInfo);
9200 ExitInfo.uReason = pVmxTransient->uExitReason;
9201 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9202 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9203 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9204 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9205
9206 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9207 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9208 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9209 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9210 {
9211 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9212 rcStrict = VINF_SUCCESS;
9213 }
9214 return rcStrict;
9215}
9216
9217
9218/**
9219 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9220 */
9221HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9222{
9223 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9224
9225 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9226 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9227 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9228 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9229 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9230 AssertRCReturn(rc, rc);
9231
9232 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9233
9234 VMXVEXITINFO ExitInfo;
9235 RT_ZERO(ExitInfo);
9236 ExitInfo.uReason = pVmxTransient->uExitReason;
9237 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9238 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9239 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9240 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9241
9242 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9243 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9244 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9245 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9246 {
9247 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9248 rcStrict = VINF_SUCCESS;
9249 }
9250 return rcStrict;
9251}
9252#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9253/** @} */
9254
9255
9256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9257/** @name Nested-guest VM-exit handlers.
9258 * @{
9259 */
9260/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9261/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9262/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9263
9264/**
9265 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9266 * Conditional VM-exit.
9267 */
9268HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9269{
9270 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9271
9272 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9273
9274 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9275 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9276 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9277
9278 switch (uExitIntType)
9279 {
9280#ifndef IN_NEM_DARWIN
9281 /*
9282 * Physical NMIs:
9283 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9284 */
9285 case VMX_EXIT_INT_INFO_TYPE_NMI:
9286 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9287#endif
9288
9289 /*
9290 * Hardware exceptions,
9291 * Software exceptions,
9292 * Privileged software exceptions:
9293 * Figure out if the exception must be delivered to the guest or the nested-guest.
9294 */
9295 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9296 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9297 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9298 {
9299 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9300 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9301 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9302 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9303
9304 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9305 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9306 pVmxTransient->uExitIntErrorCode);
9307 if (fIntercept)
9308 {
9309 /* Exit qualification is required for debug and page-fault exceptions. */
9310 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9311
9312 /*
9313 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9314 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9315 * length. However, if delivery of a software interrupt, software exception or privileged
9316 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9317 */
9318 VMXVEXITINFO ExitInfo;
9319 RT_ZERO(ExitInfo);
9320 ExitInfo.uReason = pVmxTransient->uExitReason;
9321 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9322 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9323
9324 VMXVEXITEVENTINFO ExitEventInfo;
9325 RT_ZERO(ExitEventInfo);
9326 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9327 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9328 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9329 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9330
9331#ifdef DEBUG_ramshankar
9332 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9333 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9334 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9335 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9336 {
9337 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9338 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9339 }
9340#endif
9341 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9342 }
9343
9344 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9345 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9346 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9347 }
9348
9349 /*
9350 * Software interrupts:
9351 * VM-exits cannot be caused by software interrupts.
9352 *
9353 * External interrupts:
9354 * This should only happen when "acknowledge external interrupts on VM-exit"
9355 * control is set. However, we never set this when executing a guest or
9356 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9357 * the guest.
9358 */
9359 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9360 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9361 default:
9362 {
9363 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9364 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9365 }
9366 }
9367}
9368
9369
9370/**
9371 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9372 * Unconditional VM-exit.
9373 */
9374HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9375{
9376 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9377 return IEMExecVmxVmexitTripleFault(pVCpu);
9378}
9379
9380
9381/**
9382 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9383 */
9384HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9385{
9386 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9387
9388 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9389 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9390 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9391}
9392
9393
9394/**
9395 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9396 */
9397HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9398{
9399 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9400
9401 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9402 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9403 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9404}
9405
9406
9407/**
9408 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9409 * Unconditional VM-exit.
9410 */
9411HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9412{
9413 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9414
9415 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9416 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9417 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9418 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9419
9420 VMXVEXITINFO ExitInfo;
9421 RT_ZERO(ExitInfo);
9422 ExitInfo.uReason = pVmxTransient->uExitReason;
9423 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9424 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9425
9426 VMXVEXITEVENTINFO ExitEventInfo;
9427 RT_ZERO(ExitEventInfo);
9428 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9429 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9430 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9431}
9432
9433
9434/**
9435 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9436 */
9437HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9438{
9439 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9440
9441 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9442 {
9443 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9444 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9445 }
9446 return vmxHCExitHlt(pVCpu, pVmxTransient);
9447}
9448
9449
9450/**
9451 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9452 */
9453HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9454{
9455 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9456
9457 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9458 {
9459 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9460 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9461
9462 VMXVEXITINFO ExitInfo;
9463 RT_ZERO(ExitInfo);
9464 ExitInfo.uReason = pVmxTransient->uExitReason;
9465 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9466 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9467 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9468 }
9469 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9470}
9471
9472
9473/**
9474 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9475 */
9476HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9477{
9478 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9479
9480 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9481 {
9482 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9483 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9484 }
9485 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9486}
9487
9488
9489/**
9490 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9491 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9492 */
9493HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9494{
9495 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9496
9497 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9498 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9499
9500 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9501
9502 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9503 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9504 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9505
9506 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9507 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9508 u64VmcsField &= UINT64_C(0xffffffff);
9509
9510 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9511 {
9512 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9513 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9514
9515 VMXVEXITINFO ExitInfo;
9516 RT_ZERO(ExitInfo);
9517 ExitInfo.uReason = pVmxTransient->uExitReason;
9518 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9519 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9520 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9521 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9522 }
9523
9524 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9525 return vmxHCExitVmread(pVCpu, pVmxTransient);
9526 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9527}
9528
9529
9530/**
9531 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9532 */
9533HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9534{
9535 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9536
9537 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9538 {
9539 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9540 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9541 }
9542
9543 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9544}
9545
9546
9547/**
9548 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9549 * Conditional VM-exit.
9550 */
9551HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9552{
9553 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9554
9555 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9556 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9557
9558 VBOXSTRICTRC rcStrict;
9559 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9560 switch (uAccessType)
9561 {
9562 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9563 {
9564 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9565 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9566 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9567 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9568
9569 bool fIntercept;
9570 switch (iCrReg)
9571 {
9572 case 0:
9573 case 4:
9574 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9575 break;
9576
9577 case 3:
9578 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9579 break;
9580
9581 case 8:
9582 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9583 break;
9584
9585 default:
9586 fIntercept = false;
9587 break;
9588 }
9589 if (fIntercept)
9590 {
9591 VMXVEXITINFO ExitInfo;
9592 RT_ZERO(ExitInfo);
9593 ExitInfo.uReason = pVmxTransient->uExitReason;
9594 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9595 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9596 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9597 }
9598 else
9599 {
9600 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9601 AssertRCReturn(rc, rc);
9602 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9603 }
9604 break;
9605 }
9606
9607 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9608 {
9609 /*
9610 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9611 * CR2 reads do not cause a VM-exit.
9612 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9613 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9614 */
9615 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9616 if ( iCrReg == 3
9617 || iCrReg == 8)
9618 {
9619 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9620 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9621 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9622 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9623 {
9624 VMXVEXITINFO ExitInfo;
9625 RT_ZERO(ExitInfo);
9626 ExitInfo.uReason = pVmxTransient->uExitReason;
9627 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9628 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9629 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9630 }
9631 else
9632 {
9633 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9634 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9635 }
9636 }
9637 else
9638 {
9639 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9640 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9641 }
9642 break;
9643 }
9644
9645 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9646 {
9647 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9648 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9649 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9650 if ( (uGstHostMask & X86_CR0_TS)
9651 && (uReadShadow & X86_CR0_TS))
9652 {
9653 VMXVEXITINFO ExitInfo;
9654 RT_ZERO(ExitInfo);
9655 ExitInfo.uReason = pVmxTransient->uExitReason;
9656 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9657 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9658 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9659 }
9660 else
9661 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9662 break;
9663 }
9664
9665 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9666 {
9667 RTGCPTR GCPtrEffDst;
9668 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9669 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9670 if (fMemOperand)
9671 {
9672 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9673 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9674 }
9675 else
9676 GCPtrEffDst = NIL_RTGCPTR;
9677
9678 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9679 {
9680 VMXVEXITINFO ExitInfo;
9681 RT_ZERO(ExitInfo);
9682 ExitInfo.uReason = pVmxTransient->uExitReason;
9683 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9684 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9685 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9686 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9687 }
9688 else
9689 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9690 break;
9691 }
9692
9693 default:
9694 {
9695 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9696 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9697 }
9698 }
9699
9700 if (rcStrict == VINF_IEM_RAISED_XCPT)
9701 {
9702 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9703 rcStrict = VINF_SUCCESS;
9704 }
9705 return rcStrict;
9706}
9707
9708
9709/**
9710 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9711 * Conditional VM-exit.
9712 */
9713HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9714{
9715 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9716
9717 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9718 {
9719 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9720 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9721
9722 VMXVEXITINFO ExitInfo;
9723 RT_ZERO(ExitInfo);
9724 ExitInfo.uReason = pVmxTransient->uExitReason;
9725 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9726 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9727 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9728 }
9729 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9730}
9731
9732
9733/**
9734 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9735 * Conditional VM-exit.
9736 */
9737HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9738{
9739 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9740
9741 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9742
9743 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9744 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9745 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9746
9747 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9748 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9749 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9750 {
9751 /*
9752 * IN/OUT instruction:
9753 * - Provides VM-exit instruction length.
9754 *
9755 * INS/OUTS instruction:
9756 * - Provides VM-exit instruction length.
9757 * - Provides Guest-linear address.
9758 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9759 */
9760 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9761 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9762
9763 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9764 pVmxTransient->ExitInstrInfo.u = 0;
9765 pVmxTransient->uGuestLinearAddr = 0;
9766
9767 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9768 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9769 if (fIOString)
9770 {
9771 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9772 if (fVmxInsOutsInfo)
9773 {
9774 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9775 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9776 }
9777 }
9778
9779 VMXVEXITINFO ExitInfo;
9780 RT_ZERO(ExitInfo);
9781 ExitInfo.uReason = pVmxTransient->uExitReason;
9782 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9783 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9784 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9785 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9786 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9787 }
9788 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9789}
9790
9791
9792/**
9793 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9794 */
9795HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9796{
9797 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9798
9799 uint32_t fMsrpm;
9800 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9801 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9802 else
9803 fMsrpm = VMXMSRPM_EXIT_RD;
9804
9805 if (fMsrpm & VMXMSRPM_EXIT_RD)
9806 {
9807 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9808 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9809 }
9810 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9811}
9812
9813
9814/**
9815 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9816 */
9817HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9818{
9819 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9820
9821 uint32_t fMsrpm;
9822 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9823 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9824 else
9825 fMsrpm = VMXMSRPM_EXIT_WR;
9826
9827 if (fMsrpm & VMXMSRPM_EXIT_WR)
9828 {
9829 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9830 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9831 }
9832 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9833}
9834
9835
9836/**
9837 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9838 */
9839HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9840{
9841 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9842
9843 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9844 {
9845 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9846 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9847 }
9848 return vmxHCExitMwait(pVCpu, pVmxTransient);
9849}
9850
9851
9852/**
9853 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9854 * VM-exit.
9855 */
9856HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9857{
9858 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9859
9860 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9861 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9862 VMXVEXITINFO ExitInfo;
9863 RT_ZERO(ExitInfo);
9864 ExitInfo.uReason = pVmxTransient->uExitReason;
9865 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9866 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9867}
9868
9869
9870/**
9871 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9872 */
9873HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9874{
9875 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9876
9877 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9878 {
9879 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9880 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9881 }
9882 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9883}
9884
9885
9886/**
9887 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9888 */
9889HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9890{
9891 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9892
9893 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9894 * PAUSE when executing a nested-guest? If it does not, we would not need
9895 * to check for the intercepts here. Just call VM-exit... */
9896
9897 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9898 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9899 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9900 {
9901 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9902 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9903 }
9904 return vmxHCExitPause(pVCpu, pVmxTransient);
9905}
9906
9907
9908/**
9909 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9910 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9911 */
9912HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9913{
9914 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9915
9916 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9917 {
9918 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9919 VMXVEXITINFO ExitInfo;
9920 RT_ZERO(ExitInfo);
9921 ExitInfo.uReason = pVmxTransient->uExitReason;
9922 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9923 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9924 }
9925 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9926}
9927
9928
9929/**
9930 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9931 * VM-exit.
9932 */
9933HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9934{
9935 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9936
9937 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9938 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9939 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9940 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9941
9942 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9943
9944 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9945 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9946
9947 VMXVEXITINFO ExitInfo;
9948 RT_ZERO(ExitInfo);
9949 ExitInfo.uReason = pVmxTransient->uExitReason;
9950 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9951 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9952
9953 VMXVEXITEVENTINFO ExitEventInfo;
9954 RT_ZERO(ExitEventInfo);
9955 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9956 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9957 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
9958}
9959
9960
9961/**
9962 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
9963 * Conditional VM-exit.
9964 */
9965HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9966{
9967 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9968
9969 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
9970 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9971 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9972}
9973
9974
9975/**
9976 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
9977 * Conditional VM-exit.
9978 */
9979HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9980{
9981 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9982
9983 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
9984 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9985 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9986}
9987
9988
9989/**
9990 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
9991 */
9992HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9993{
9994 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9995
9996 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9997 {
9998 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
9999 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10000 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10001 }
10002 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10003}
10004
10005
10006/**
10007 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10008 */
10009HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10010{
10011 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10012
10013 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10014 {
10015 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10016 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10017 }
10018 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10019}
10020
10021
10022/**
10023 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10024 */
10025HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10026{
10027 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10028
10029 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10030 {
10031 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10032 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10033 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10034 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10035
10036 VMXVEXITINFO ExitInfo;
10037 RT_ZERO(ExitInfo);
10038 ExitInfo.uReason = pVmxTransient->uExitReason;
10039 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10040 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10041 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10042 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10043 }
10044 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10045}
10046
10047
10048/**
10049 * Nested-guest VM-exit handler for invalid-guest state
10050 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10051 */
10052HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10055
10056 /*
10057 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10058 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10059 * Handle it like it's in an invalid guest state of the outer guest.
10060 *
10061 * When the fast path is implemented, this should be changed to cause the corresponding
10062 * nested-guest VM-exit.
10063 */
10064 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10065}
10066
10067
10068/**
10069 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10070 * and only provide the instruction length.
10071 *
10072 * Unconditional VM-exit.
10073 */
10074HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10075{
10076 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10077
10078#ifdef VBOX_STRICT
10079 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10080 switch (pVmxTransient->uExitReason)
10081 {
10082 case VMX_EXIT_ENCLS:
10083 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10084 break;
10085
10086 case VMX_EXIT_VMFUNC:
10087 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10088 break;
10089 }
10090#endif
10091
10092 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10093 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10094}
10095
10096
10097/**
10098 * Nested-guest VM-exit handler for instructions that provide instruction length as
10099 * well as more information.
10100 *
10101 * Unconditional VM-exit.
10102 */
10103HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10104{
10105 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10106
10107#ifdef VBOX_STRICT
10108 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10109 switch (pVmxTransient->uExitReason)
10110 {
10111 case VMX_EXIT_GDTR_IDTR_ACCESS:
10112 case VMX_EXIT_LDTR_TR_ACCESS:
10113 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10114 break;
10115
10116 case VMX_EXIT_RDRAND:
10117 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10118 break;
10119
10120 case VMX_EXIT_RDSEED:
10121 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10122 break;
10123
10124 case VMX_EXIT_XSAVES:
10125 case VMX_EXIT_XRSTORS:
10126 /** @todo NSTVMX: Verify XSS-bitmap. */
10127 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10128 break;
10129
10130 case VMX_EXIT_UMWAIT:
10131 case VMX_EXIT_TPAUSE:
10132 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10133 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10134 break;
10135
10136 case VMX_EXIT_LOADIWKEY:
10137 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10138 break;
10139 }
10140#endif
10141
10142 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10143 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10144 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10145
10146 VMXVEXITINFO ExitInfo;
10147 RT_ZERO(ExitInfo);
10148 ExitInfo.uReason = pVmxTransient->uExitReason;
10149 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10150 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10151 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10152 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10153}
10154
10155/** @} */
10156#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10157
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette