VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 53442

最後變更 在這個檔案從53442是 53325,由 vboxsync 提交於 10 年 前

VMM: Fix sign inconsistency in the RealUseTSC case.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 505.0 KB
 
1/* $Id: HMVMXR0.cpp 53325 2014-11-14 13:46:38Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/x86.h>
23#include <iprt/asm-amd64-x86.h>
24#include <iprt/thread.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HMVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iem.h>
32#include <VBox/vmm/iom.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/gim.h>
36#ifdef VBOX_WITH_REM
37# include <VBox/vmm/rem.h>
38#endif
39#ifdef DEBUG_ramshankar
40# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
41# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
42# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
43# define HMVMX_ALWAYS_CHECK_GUEST_STATE
44# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
45# define HMVMX_ALWAYS_TRAP_PF
46# define HMVMX_ALWAYS_SWAP_FPU_STATE
47# define HMVMX_ALWAYS_FLUSH_TLB
48# define HMVMX_ALWAYS_SWAP_EFER
49#endif
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if defined(RT_ARCH_AMD64)
56# define HMVMX_IS_64BIT_HOST_MODE() (true)
57typedef RTHCUINTREG HMVMXHCUINTREG;
58#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
59extern "C" uint32_t g_fVMXIs64bitHost;
60# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
61typedef uint64_t HMVMXHCUINTREG;
62#else
63# define HMVMX_IS_64BIT_HOST_MODE() (false)
64typedef RTHCUINTREG HMVMXHCUINTREG;
65#endif
66
67/** Use the function table. */
68#define HMVMX_USE_FUNCTION_TABLE
69
70/** Determine which tagged-TLB flush handler to use. */
71#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
72#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
73#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
74#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
75
76/** @name Updated-guest-state flags.
77 * @{ */
78#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
79#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
80#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
81#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
82#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
83#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
84#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
85#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
86#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
87#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
88#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
89#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
90#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
91#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
92#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
93#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
94#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
95#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
96#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
97#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
98#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
99 | HMVMX_UPDATED_GUEST_RSP \
100 | HMVMX_UPDATED_GUEST_RFLAGS \
101 | HMVMX_UPDATED_GUEST_CR0 \
102 | HMVMX_UPDATED_GUEST_CR3 \
103 | HMVMX_UPDATED_GUEST_CR4 \
104 | HMVMX_UPDATED_GUEST_GDTR \
105 | HMVMX_UPDATED_GUEST_IDTR \
106 | HMVMX_UPDATED_GUEST_LDTR \
107 | HMVMX_UPDATED_GUEST_TR \
108 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
109 | HMVMX_UPDATED_GUEST_DEBUG \
110 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
111 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
112 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
113 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
114 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
115 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
116 | HMVMX_UPDATED_GUEST_INTR_STATE \
117 | HMVMX_UPDATED_GUEST_APIC_STATE)
118/** @} */
119
120/** @name
121 * Flags to skip redundant reads of some common VMCS fields that are not part of
122 * the guest-CPU state but are in the transient structure.
123 */
124#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
125#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
126#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
127#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
128#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
129#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
130#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
131/** @} */
132
133/** @name
134 * States of the VMCS.
135 *
136 * This does not reflect all possible VMCS states but currently only those
137 * needed for maintaining the VMCS consistently even when thread-context hooks
138 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
139 */
140#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
141#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
142#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
143/** @} */
144
145/**
146 * Exception bitmap mask for real-mode guests (real-on-v86).
147 *
148 * We need to intercept all exceptions manually (except #PF). #NM is also
149 * handled separately, see hmR0VmxLoadSharedCR0(). #PF need not be intercepted
150 * even in real-mode if we have Nested Paging support.
151 */
152#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
153 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
154 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
155 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
156 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
157 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
158 | RT_BIT(X86_XCPT_XF))
159
160/**
161 * Exception bitmap mask for all contributory exceptions.
162 *
163 * Page fault is deliberately excluded here as it's conditional as to whether
164 * it's contributory or benign. Page faults are handled separately.
165 */
166#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
167 | RT_BIT(X86_XCPT_DE))
168
169/** Maximum VM-instruction error number. */
170#define HMVMX_INSTR_ERROR_MAX 28
171
172/** Profiling macro. */
173#ifdef HM_PROFILE_EXIT_DISPATCH
174# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
175# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
176#else
177# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
178# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
179#endif
180
181/** Assert that preemption is disabled or covered by thread-context hooks. */
182#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
183 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
184
185/** Assert that we haven't migrated CPUs when thread-context hooks are not
186 * used. */
187#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
188 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
189 ("Illegal migration! Entered on CPU %u Current %u\n", \
190 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
191
192/** Helper macro for VM-exit handlers called unexpectedly. */
193#define HMVMX_RETURN_UNEXPECTED_EXIT() \
194 do { \
195 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
196 return VERR_VMX_UNEXPECTED_EXIT; \
197 } while (0)
198
199
200/*******************************************************************************
201* Structures and Typedefs *
202*******************************************************************************/
203/**
204 * VMX transient state.
205 *
206 * A state structure for holding miscellaneous information across
207 * VMX non-root operation and restored after the transition.
208 */
209typedef struct VMXTRANSIENT
210{
211 /** The host's rflags/eflags. */
212 RTCCUINTREG uEflags;
213#if HC_ARCH_BITS == 32
214 uint32_t u32Alignment0;
215#endif
216 /** The guest's TPR value used for TPR shadowing. */
217 uint8_t u8GuestTpr;
218 /** Alignment. */
219 uint8_t abAlignment0[7];
220
221 /** The basic VM-exit reason. */
222 uint16_t uExitReason;
223 /** Alignment. */
224 uint16_t u16Alignment0;
225 /** The VM-exit interruption error code. */
226 uint32_t uExitIntErrorCode;
227 /** The VM-exit exit code qualification. */
228 uint64_t uExitQualification;
229
230 /** The VM-exit interruption-information field. */
231 uint32_t uExitIntInfo;
232 /** The VM-exit instruction-length field. */
233 uint32_t cbInstr;
234 /** The VM-exit instruction-information field. */
235 union
236 {
237 /** Plain unsigned int representation. */
238 uint32_t u;
239 /** INS and OUTS information. */
240 struct
241 {
242 uint32_t u6Reserved0 : 7;
243 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
244 uint32_t u3AddrSize : 3;
245 uint32_t u5Reserved1 : 5;
246 /** The segment register (X86_SREG_XXX). */
247 uint32_t iSegReg : 3;
248 uint32_t uReserved2 : 14;
249 } StrIo;
250 } ExitInstrInfo;
251 /** Whether the VM-entry failed or not. */
252 bool fVMEntryFailed;
253 /** Alignment. */
254 uint8_t abAlignment1[3];
255
256 /** The VM-entry interruption-information field. */
257 uint32_t uEntryIntInfo;
258 /** The VM-entry exception error code field. */
259 uint32_t uEntryXcptErrorCode;
260 /** The VM-entry instruction length field. */
261 uint32_t cbEntryInstr;
262
263 /** IDT-vectoring information field. */
264 uint32_t uIdtVectoringInfo;
265 /** IDT-vectoring error code. */
266 uint32_t uIdtVectoringErrorCode;
267
268 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
269 uint32_t fVmcsFieldsRead;
270
271 /** Whether the guest FPU was active at the time of VM-exit. */
272 bool fWasGuestFPUStateActive;
273 /** Whether the guest debug state was active at the time of VM-exit. */
274 bool fWasGuestDebugStateActive;
275 /** Whether the hyper debug state was active at the time of VM-exit. */
276 bool fWasHyperDebugStateActive;
277 /** Whether TSC-offsetting should be setup before VM-entry. */
278 bool fUpdateTscOffsettingAndPreemptTimer;
279 /** Whether the VM-exit was caused by a page-fault during delivery of a
280 * contributory exception or a page-fault. */
281 bool fVectoringDoublePF;
282 /** Whether the VM-exit was caused by a page-fault during delivery of an
283 * external interrupt or NMI. */
284 bool fVectoringPF;
285} VMXTRANSIENT;
286AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
287AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
288AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
289AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
290AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
291/** Pointer to VMX transient state. */
292typedef VMXTRANSIENT *PVMXTRANSIENT;
293
294
295/**
296 * MSR-bitmap read permissions.
297 */
298typedef enum VMXMSREXITREAD
299{
300 /** Reading this MSR causes a VM-exit. */
301 VMXMSREXIT_INTERCEPT_READ = 0xb,
302 /** Reading this MSR does not cause a VM-exit. */
303 VMXMSREXIT_PASSTHRU_READ
304} VMXMSREXITREAD;
305/** Pointer to MSR-bitmap read permissions. */
306typedef VMXMSREXITREAD* PVMXMSREXITREAD;
307
308/**
309 * MSR-bitmap write permissions.
310 */
311typedef enum VMXMSREXITWRITE
312{
313 /** Writing to this MSR causes a VM-exit. */
314 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
315 /** Writing to this MSR does not cause a VM-exit. */
316 VMXMSREXIT_PASSTHRU_WRITE
317} VMXMSREXITWRITE;
318/** Pointer to MSR-bitmap write permissions. */
319typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
320
321
322/**
323 * VMX VM-exit handler.
324 *
325 * @returns VBox status code.
326 * @param pVCpu Pointer to the VMCPU.
327 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
328 * out-of-sync. Make sure to update the required
329 * fields before using them.
330 * @param pVmxTransient Pointer to the VMX-transient structure.
331 */
332#ifndef HMVMX_USE_FUNCTION_TABLE
333typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
334#else
335typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
336/** Pointer to VM-exit handler. */
337typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
338#endif
339
340
341/*******************************************************************************
342* Internal Functions *
343*******************************************************************************/
344static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
345static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
346static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
347 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
348 bool fStepping, uint32_t *puIntState);
349#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
350static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
351#endif
352#ifndef HMVMX_USE_FUNCTION_TABLE
353DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
354# define HMVMX_EXIT_DECL static int
355#else
356# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
357#endif
358DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
359 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
360
361/** @name VM-exit handlers.
362 * @{
363 */
364static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
365static FNVMXEXITHANDLER hmR0VmxExitExtInt;
366static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
367static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
368static FNVMXEXITHANDLER hmR0VmxExitSipi;
369static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
370static FNVMXEXITHANDLER hmR0VmxExitSmi;
371static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
372static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
373static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
374static FNVMXEXITHANDLER hmR0VmxExitCpuid;
375static FNVMXEXITHANDLER hmR0VmxExitGetsec;
376static FNVMXEXITHANDLER hmR0VmxExitHlt;
377static FNVMXEXITHANDLER hmR0VmxExitInvd;
378static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
379static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
380static FNVMXEXITHANDLER hmR0VmxExitVmcall;
381static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
382static FNVMXEXITHANDLER hmR0VmxExitRsm;
383static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
384static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
385static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
386static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
387static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
388static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
389static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
390static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
391static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
392static FNVMXEXITHANDLER hmR0VmxExitMwait;
393static FNVMXEXITHANDLER hmR0VmxExitMtf;
394static FNVMXEXITHANDLER hmR0VmxExitMonitor;
395static FNVMXEXITHANDLER hmR0VmxExitPause;
396static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
397static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
398static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
399static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
400static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
401static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
402static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
403static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
404static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
405static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
406static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
407static FNVMXEXITHANDLER hmR0VmxExitRdrand;
408static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
409/** @} */
410
411static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
412static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
413static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
414static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
415static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
416static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
417#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
418static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
419#endif
420static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
421
422/*******************************************************************************
423* Global Variables *
424*******************************************************************************/
425#ifdef HMVMX_USE_FUNCTION_TABLE
426
427/**
428 * VMX_EXIT dispatch table.
429 */
430static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
431{
432 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
433 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
434 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
435 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
436 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
437 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
438 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
439 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
440 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
441 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
442 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
443 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
444 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
445 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
446 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
447 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
448 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
449 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
450 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
451 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
452 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
453 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
454 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
455 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
456 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
457 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
458 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
459 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
460 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
461 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
462 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
463 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
464 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
465 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
466 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
467 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
468 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
469 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
470 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
471 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
472 /* 40 UNDEFINED */ hmR0VmxExitPause,
473 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
474 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
475 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
476 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
477 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
478 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
479 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
480 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
481 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
482 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
483 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
484 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
485 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
486 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
487 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
488 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
489 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
490 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
491 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
492};
493#endif /* HMVMX_USE_FUNCTION_TABLE */
494
495#ifdef VBOX_STRICT
496static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
497{
498 /* 0 */ "(Not Used)",
499 /* 1 */ "VMCALL executed in VMX root operation.",
500 /* 2 */ "VMCLEAR with invalid physical address.",
501 /* 3 */ "VMCLEAR with VMXON pointer.",
502 /* 4 */ "VMLAUNCH with non-clear VMCS.",
503 /* 5 */ "VMRESUME with non-launched VMCS.",
504 /* 6 */ "VMRESUME after VMXOFF",
505 /* 7 */ "VM-entry with invalid control fields.",
506 /* 8 */ "VM-entry with invalid host state fields.",
507 /* 9 */ "VMPTRLD with invalid physical address.",
508 /* 10 */ "VMPTRLD with VMXON pointer.",
509 /* 11 */ "VMPTRLD with incorrect revision identifier.",
510 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
511 /* 13 */ "VMWRITE to read-only VMCS component.",
512 /* 14 */ "(Not Used)",
513 /* 15 */ "VMXON executed in VMX root operation.",
514 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
515 /* 17 */ "VM-entry with non-launched executing VMCS.",
516 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
517 /* 19 */ "VMCALL with non-clear VMCS.",
518 /* 20 */ "VMCALL with invalid VM-exit control fields.",
519 /* 21 */ "(Not Used)",
520 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
521 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
522 /* 24 */ "VMCALL with invalid SMM-monitor features.",
523 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
524 /* 26 */ "VM-entry with events blocked by MOV SS.",
525 /* 27 */ "(Not Used)",
526 /* 28 */ "Invalid operand to INVEPT/INVVPID."
527};
528#endif /* VBOX_STRICT */
529
530
531
532/**
533 * Updates the VM's last error record. If there was a VMX instruction error,
534 * reads the error data from the VMCS and updates VCPU's last error record as
535 * well.
536 *
537 * @param pVM Pointer to the VM.
538 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
539 * VERR_VMX_UNABLE_TO_START_VM or
540 * VERR_VMX_INVALID_VMCS_FIELD).
541 * @param rc The error code.
542 */
543static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
544{
545 AssertPtr(pVM);
546 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
547 || rc == VERR_VMX_UNABLE_TO_START_VM)
548 {
549 AssertPtrReturnVoid(pVCpu);
550 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
551 }
552 pVM->hm.s.lLastError = rc;
553}
554
555
556/**
557 * Reads the VM-entry interruption-information field from the VMCS into the VMX
558 * transient structure.
559 *
560 * @returns VBox status code.
561 * @param pVmxTransient Pointer to the VMX transient structure.
562 *
563 * @remarks No-long-jump zone!!!
564 */
565DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
566{
567 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
568 AssertRCReturn(rc, rc);
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Reads the VM-entry exception error code field from the VMCS into
575 * the VMX transient structure.
576 *
577 * @returns VBox status code.
578 * @param pVmxTransient Pointer to the VMX transient structure.
579 *
580 * @remarks No-long-jump zone!!!
581 */
582DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
583{
584 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
585 AssertRCReturn(rc, rc);
586 return VINF_SUCCESS;
587}
588
589
590/**
591 * Reads the VM-entry exception error code field from the VMCS into
592 * the VMX transient structure.
593 *
594 * @returns VBox status code.
595 * @param pVmxTransient Pointer to the VMX transient structure.
596 *
597 * @remarks No-long-jump zone!!!
598 */
599DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
600{
601 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
602 AssertRCReturn(rc, rc);
603 return VINF_SUCCESS;
604}
605
606
607/**
608 * Reads the VM-exit interruption-information field from the VMCS into the VMX
609 * transient structure.
610 *
611 * @returns VBox status code.
612 * @param pVmxTransient Pointer to the VMX transient structure.
613 */
614DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
615{
616 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
617 {
618 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
619 AssertRCReturn(rc, rc);
620 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
621 }
622 return VINF_SUCCESS;
623}
624
625
626/**
627 * Reads the VM-exit interruption error code from the VMCS into the VMX
628 * transient structure.
629 *
630 * @returns VBox status code.
631 * @param pVmxTransient Pointer to the VMX transient structure.
632 */
633DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
634{
635 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
636 {
637 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
638 AssertRCReturn(rc, rc);
639 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
640 }
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * Reads the VM-exit instruction length field from the VMCS into the VMX
647 * transient structure.
648 *
649 * @returns VBox status code.
650 * @param pVCpu Pointer to the VMCPU.
651 * @param pVmxTransient Pointer to the VMX transient structure.
652 */
653DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
654{
655 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
656 {
657 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
658 AssertRCReturn(rc, rc);
659 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
660 }
661 return VINF_SUCCESS;
662}
663
664
665/**
666 * Reads the VM-exit instruction-information field from the VMCS into
667 * the VMX transient structure.
668 *
669 * @returns VBox status code.
670 * @param pVmxTransient Pointer to the VMX transient structure.
671 */
672DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
673{
674 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
675 {
676 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
677 AssertRCReturn(rc, rc);
678 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
679 }
680 return VINF_SUCCESS;
681}
682
683
684/**
685 * Reads the exit code qualification from the VMCS into the VMX transient
686 * structure.
687 *
688 * @returns VBox status code.
689 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
690 * case).
691 * @param pVmxTransient Pointer to the VMX transient structure.
692 */
693DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
694{
695 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
696 {
697 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
698 AssertRCReturn(rc, rc);
699 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
700 }
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Reads the IDT-vectoring information field from the VMCS into the VMX
707 * transient structure.
708 *
709 * @returns VBox status code.
710 * @param pVmxTransient Pointer to the VMX transient structure.
711 *
712 * @remarks No-long-jump zone!!!
713 */
714DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
715{
716 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
717 {
718 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
719 AssertRCReturn(rc, rc);
720 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
721 }
722 return VINF_SUCCESS;
723}
724
725
726/**
727 * Reads the IDT-vectoring error code from the VMCS into the VMX
728 * transient structure.
729 *
730 * @returns VBox status code.
731 * @param pVmxTransient Pointer to the VMX transient structure.
732 */
733DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
734{
735 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
736 {
737 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
738 AssertRCReturn(rc, rc);
739 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
740 }
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Enters VMX root mode operation on the current CPU.
747 *
748 * @returns VBox status code.
749 * @param pVM Pointer to the VM (optional, can be NULL, after
750 * a resume).
751 * @param HCPhysCpuPage Physical address of the VMXON region.
752 * @param pvCpuPage Pointer to the VMXON region.
753 */
754static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
755{
756 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
757 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
758 Assert(pvCpuPage);
759 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
760
761 if (pVM)
762 {
763 /* Write the VMCS revision dword to the VMXON region. */
764 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
765 }
766
767 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
768 RTCCUINTREG uEflags = ASMIntDisableFlags();
769
770 /* Enable the VMX bit in CR4 if necessary. */
771 RTCCUINTREG uCr4 = ASMGetCR4();
772 if (!(uCr4 & X86_CR4_VMXE))
773 ASMSetCR4(uCr4 | X86_CR4_VMXE);
774
775 /* Enter VMX root mode. */
776 int rc = VMXEnable(HCPhysCpuPage);
777 if (RT_FAILURE(rc))
778 ASMSetCR4(uCr4);
779
780 /* Restore interrupts. */
781 ASMSetFlags(uEflags);
782 return rc;
783}
784
785
786/**
787 * Exits VMX root mode operation on the current CPU.
788 *
789 * @returns VBox status code.
790 */
791static int hmR0VmxLeaveRootMode(void)
792{
793 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
794
795 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
796 RTCCUINTREG uEflags = ASMIntDisableFlags();
797
798 /* If we're for some reason not in VMX root mode, then don't leave it. */
799 RTCCUINTREG uHostCR4 = ASMGetCR4();
800
801 int rc;
802 if (uHostCR4 & X86_CR4_VMXE)
803 {
804 /* Exit VMX root mode and clear the VMX bit in CR4. */
805 VMXDisable();
806 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
807 rc = VINF_SUCCESS;
808 }
809 else
810 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
811
812 /* Restore interrupts. */
813 ASMSetFlags(uEflags);
814 return rc;
815}
816
817
818/**
819 * Allocates and maps one physically contiguous page. The allocated page is
820 * zero'd out. (Used by various VT-x structures).
821 *
822 * @returns IPRT status code.
823 * @param pMemObj Pointer to the ring-0 memory object.
824 * @param ppVirt Where to store the virtual address of the
825 * allocation.
826 * @param pPhys Where to store the physical address of the
827 * allocation.
828 */
829DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
830{
831 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
832 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
833 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
834
835 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
836 if (RT_FAILURE(rc))
837 return rc;
838 *ppVirt = RTR0MemObjAddress(*pMemObj);
839 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
840 ASMMemZero32(*ppVirt, PAGE_SIZE);
841 return VINF_SUCCESS;
842}
843
844
845/**
846 * Frees and unmaps an allocated physical page.
847 *
848 * @param pMemObj Pointer to the ring-0 memory object.
849 * @param ppVirt Where to re-initialize the virtual address of
850 * allocation as 0.
851 * @param pHCPhys Where to re-initialize the physical address of the
852 * allocation as 0.
853 */
854DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
855{
856 AssertPtr(pMemObj);
857 AssertPtr(ppVirt);
858 AssertPtr(pHCPhys);
859 if (*pMemObj != NIL_RTR0MEMOBJ)
860 {
861 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
862 AssertRC(rc);
863 *pMemObj = NIL_RTR0MEMOBJ;
864 *ppVirt = 0;
865 *pHCPhys = 0;
866 }
867}
868
869
870/**
871 * Worker function to free VT-x related structures.
872 *
873 * @returns IPRT status code.
874 * @param pVM Pointer to the VM.
875 */
876static void hmR0VmxStructsFree(PVM pVM)
877{
878 for (VMCPUID i = 0; i < pVM->cCpus; i++)
879 {
880 PVMCPU pVCpu = &pVM->aCpus[i];
881 AssertPtr(pVCpu);
882
883 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
884 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
885
886 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
887 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
888
889 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
890 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
891 }
892
893 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
894#ifdef VBOX_WITH_CRASHDUMP_MAGIC
895 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
896#endif
897}
898
899
900/**
901 * Worker function to allocate VT-x related VM structures.
902 *
903 * @returns IPRT status code.
904 * @param pVM Pointer to the VM.
905 */
906static int hmR0VmxStructsAlloc(PVM pVM)
907{
908 /*
909 * Initialize members up-front so we can cleanup properly on allocation failure.
910 */
911#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
912 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
913 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
914 pVM->hm.s.vmx.HCPhys##a_Name = 0;
915
916#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
917 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
918 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
919 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
920
921#ifdef VBOX_WITH_CRASHDUMP_MAGIC
922 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
923#endif
924 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
925
926 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
927 for (VMCPUID i = 0; i < pVM->cCpus; i++)
928 {
929 PVMCPU pVCpu = &pVM->aCpus[i];
930 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
931 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
932 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
933 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
934 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
935 }
936#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
937#undef VMXLOCAL_INIT_VM_MEMOBJ
938
939 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
940 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
941 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
942 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
943
944 /*
945 * Allocate all the VT-x structures.
946 */
947 int rc = VINF_SUCCESS;
948#ifdef VBOX_WITH_CRASHDUMP_MAGIC
949 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
950 if (RT_FAILURE(rc))
951 goto cleanup;
952 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
953 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
954#endif
955
956 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
957 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
958 {
959 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
960 &pVM->hm.s.vmx.HCPhysApicAccess);
961 if (RT_FAILURE(rc))
962 goto cleanup;
963 }
964
965 /*
966 * Initialize per-VCPU VT-x structures.
967 */
968 for (VMCPUID i = 0; i < pVM->cCpus; i++)
969 {
970 PVMCPU pVCpu = &pVM->aCpus[i];
971 AssertPtr(pVCpu);
972
973 /* Allocate the VM control structure (VMCS). */
974 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
975 if (RT_FAILURE(rc))
976 goto cleanup;
977
978 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
979 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
980 {
981 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
982 &pVCpu->hm.s.vmx.HCPhysVirtApic);
983 if (RT_FAILURE(rc))
984 goto cleanup;
985 }
986
987 /*
988 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
989 * transparent accesses of specific MSRs.
990 *
991 * If the condition for enabling MSR bitmaps changes here, don't forget to
992 * update HMIsMsrBitmapsAvailable().
993 */
994 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
995 {
996 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
997 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
998 if (RT_FAILURE(rc))
999 goto cleanup;
1000 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1001 }
1002
1003 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1004 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1005 if (RT_FAILURE(rc))
1006 goto cleanup;
1007
1008 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1009 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1010 if (RT_FAILURE(rc))
1011 goto cleanup;
1012 }
1013
1014 return VINF_SUCCESS;
1015
1016cleanup:
1017 hmR0VmxStructsFree(pVM);
1018 return rc;
1019}
1020
1021
1022/**
1023 * Does global VT-x initialization (called during module initialization).
1024 *
1025 * @returns VBox status code.
1026 */
1027VMMR0DECL(int) VMXR0GlobalInit(void)
1028{
1029#ifdef HMVMX_USE_FUNCTION_TABLE
1030 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1031# ifdef VBOX_STRICT
1032 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1033 Assert(g_apfnVMExitHandlers[i]);
1034# endif
1035#endif
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Does global VT-x termination (called during module termination).
1042 */
1043VMMR0DECL(void) VMXR0GlobalTerm()
1044{
1045 /* Nothing to do currently. */
1046}
1047
1048
1049/**
1050 * Sets up and activates VT-x on the current CPU.
1051 *
1052 * @returns VBox status code.
1053 * @param pCpu Pointer to the global CPU info struct.
1054 * @param pVM Pointer to the VM (can be NULL after a host resume
1055 * operation).
1056 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1057 * fEnabledByHost is true).
1058 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1059 * @a fEnabledByHost is true).
1060 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1061 * enable VT-x on the host.
1062 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1063 */
1064VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1065 void *pvMsrs)
1066{
1067 Assert(pCpu);
1068 Assert(pvMsrs);
1069 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1070
1071 /* Enable VT-x if it's not already enabled by the host. */
1072 if (!fEnabledByHost)
1073 {
1074 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1075 if (RT_FAILURE(rc))
1076 return rc;
1077 }
1078
1079 /*
1080 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1081 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1082 */
1083 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1084 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1085 {
1086 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1087 pCpu->fFlushAsidBeforeUse = false;
1088 }
1089 else
1090 pCpu->fFlushAsidBeforeUse = true;
1091
1092 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1093 ++pCpu->cTlbFlushes;
1094
1095 return VINF_SUCCESS;
1096}
1097
1098
1099/**
1100 * Deactivates VT-x on the current CPU.
1101 *
1102 * @returns VBox status code.
1103 * @param pCpu Pointer to the global CPU info struct.
1104 * @param pvCpuPage Pointer to the VMXON region.
1105 * @param HCPhysCpuPage Physical address of the VMXON region.
1106 *
1107 * @remarks This function should never be called when SUPR0EnableVTx() or
1108 * similar was used to enable VT-x on the host.
1109 */
1110VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1111{
1112 NOREF(pCpu);
1113 NOREF(pvCpuPage);
1114 NOREF(HCPhysCpuPage);
1115
1116 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1117 return hmR0VmxLeaveRootMode();
1118}
1119
1120
1121/**
1122 * Sets the permission bits for the specified MSR in the MSR bitmap.
1123 *
1124 * @param pVCpu Pointer to the VMCPU.
1125 * @param uMSR The MSR value.
1126 * @param enmRead Whether reading this MSR causes a VM-exit.
1127 * @param enmWrite Whether writing this MSR causes a VM-exit.
1128 */
1129static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1130{
1131 int32_t iBit;
1132 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1133
1134 /*
1135 * Layout:
1136 * 0x000 - 0x3ff - Low MSR read bits
1137 * 0x400 - 0x7ff - High MSR read bits
1138 * 0x800 - 0xbff - Low MSR write bits
1139 * 0xc00 - 0xfff - High MSR write bits
1140 */
1141 if (uMsr <= 0x00001FFF)
1142 iBit = uMsr;
1143 else if ( uMsr >= 0xC0000000
1144 && uMsr <= 0xC0001FFF)
1145 {
1146 iBit = (uMsr - 0xC0000000);
1147 pbMsrBitmap += 0x400;
1148 }
1149 else
1150 {
1151 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1152 return;
1153 }
1154
1155 Assert(iBit <= 0x1fff);
1156 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1157 ASMBitSet(pbMsrBitmap, iBit);
1158 else
1159 ASMBitClear(pbMsrBitmap, iBit);
1160
1161 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1162 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1163 else
1164 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1165}
1166
1167
1168#ifdef VBOX_STRICT
1169/**
1170 * Gets the permission bits for the specified MSR in the MSR bitmap.
1171 *
1172 * @returns VBox status code.
1173 * @retval VINF_SUCCESS if the specified MSR is found.
1174 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1175 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1176 *
1177 * @param pVCpu Pointer to the VMCPU.
1178 * @param uMsr The MSR.
1179 * @param penmRead Where to store the read permissions.
1180 * @param penmWrite Where to store the write permissions.
1181 */
1182static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1183{
1184 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1185 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1186 int32_t iBit;
1187 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1188
1189 /* See hmR0VmxSetMsrPermission() for the layout. */
1190 if (uMsr <= 0x00001FFF)
1191 iBit = uMsr;
1192 else if ( uMsr >= 0xC0000000
1193 && uMsr <= 0xC0001FFF)
1194 {
1195 iBit = (uMsr - 0xC0000000);
1196 pbMsrBitmap += 0x400;
1197 }
1198 else
1199 {
1200 AssertMsgFailed(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1201 return VERR_NOT_SUPPORTED;
1202 }
1203
1204 Assert(iBit <= 0x1fff);
1205 if (ASMBitTest(pbMsrBitmap, iBit))
1206 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1207 else
1208 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1209
1210 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1211 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1212 else
1213 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1214 return VINF_SUCCESS;
1215}
1216#endif /* VBOX_STRICT */
1217
1218
1219/**
1220 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1221 * area.
1222 *
1223 * @returns VBox status code.
1224 * @param pVCpu Pointer to the VMCPU.
1225 * @param cMsrs The number of MSRs.
1226 */
1227DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1228{
1229 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1230 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1231 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1232 {
1233 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1234 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1235 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1236 }
1237
1238 /* Update number of guest MSRs to load/store across the world-switch. */
1239 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1240 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1241
1242 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1243 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1244
1245 /* Update the VCPU's copy of the MSR count. */
1246 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Adds a new (or updates the value of an existing) guest/host MSR
1254 * pair to be swapped during the world-switch as part of the
1255 * auto-load/store MSR area in the VMCS.
1256 *
1257 * @returns true if the MSR was added -and- its value was updated, false
1258 * otherwise.
1259 * @param pVCpu Pointer to the VMCPU.
1260 * @param uMsr The MSR.
1261 * @param uGuestMsr Value of the guest MSR.
1262 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1263 * necessary.
1264 */
1265static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
1266{
1267 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1268 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1269 uint32_t i;
1270 for (i = 0; i < cMsrs; i++)
1271 {
1272 if (pGuestMsr->u32Msr == uMsr)
1273 break;
1274 pGuestMsr++;
1275 }
1276
1277 bool fAdded = false;
1278 if (i == cMsrs)
1279 {
1280 ++cMsrs;
1281 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1282 AssertRC(rc);
1283
1284 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1285 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1286 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1287
1288 fAdded = true;
1289 }
1290
1291 /* Update the MSR values in the auto-load/store MSR area. */
1292 pGuestMsr->u32Msr = uMsr;
1293 pGuestMsr->u64Value = uGuestMsrValue;
1294
1295 /* Create/update the MSR slot in the host MSR area. */
1296 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1297 pHostMsr += i;
1298 pHostMsr->u32Msr = uMsr;
1299
1300 /*
1301 * Update the host MSR only when requested by the caller AND when we're
1302 * adding it to the auto-load/store area. Otherwise, it would have been
1303 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1304 */
1305 bool fUpdatedMsrValue = false;
1306 if ( fAdded
1307 && fUpdateHostMsr)
1308 {
1309 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1310 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1311 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1312 fUpdatedMsrValue = true;
1313 }
1314
1315 return fUpdatedMsrValue;
1316}
1317
1318
1319/**
1320 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1321 * auto-load/store MSR area in the VMCS.
1322 *
1323 * @returns VBox status code.
1324 * @param pVCpu Pointer to the VMCPU.
1325 * @param uMsr The MSR.
1326 */
1327static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1328{
1329 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1330 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1331 for (uint32_t i = 0; i < cMsrs; i++)
1332 {
1333 /* Find the MSR. */
1334 if (pGuestMsr->u32Msr == uMsr)
1335 {
1336 /* If it's the last MSR, simply reduce the count. */
1337 if (i == cMsrs - 1)
1338 {
1339 --cMsrs;
1340 break;
1341 }
1342
1343 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1344 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1345 pLastGuestMsr += cMsrs - 1;
1346 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1347 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1348
1349 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1350 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1351 pLastHostMsr += cMsrs - 1;
1352 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1353 pHostMsr->u64Value = pLastHostMsr->u64Value;
1354 --cMsrs;
1355 break;
1356 }
1357 pGuestMsr++;
1358 }
1359
1360 /* Update the VMCS if the count changed (meaning the MSR was found). */
1361 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1362 {
1363 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1364 AssertRCReturn(rc, rc);
1365
1366 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1367 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1368 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1369
1370 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1371 return VINF_SUCCESS;
1372 }
1373
1374 return VERR_NOT_FOUND;
1375}
1376
1377
1378/**
1379 * Checks if the specified guest MSR is part of the auto-load/store area in
1380 * the VMCS.
1381 *
1382 * @returns true if found, false otherwise.
1383 * @param pVCpu Pointer to the VMCPU.
1384 * @param uMsr The MSR to find.
1385 */
1386static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1387{
1388 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1389 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1390
1391 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1392 {
1393 if (pGuestMsr->u32Msr == uMsr)
1394 return true;
1395 }
1396 return false;
1397}
1398
1399
1400/**
1401 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1402 *
1403 * @param pVCpu Pointer to the VMCPU.
1404 *
1405 * @remarks No-long-jump zone!!!
1406 */
1407static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1408{
1409 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1410 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1411 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1412 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1413
1414 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1415 {
1416 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1417
1418 /*
1419 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1420 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1421 */
1422 if (pHostMsr->u32Msr == MSR_K6_EFER)
1423 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1424 else
1425 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1426 }
1427
1428 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1429}
1430
1431
1432#if HC_ARCH_BITS == 64
1433/**
1434 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1435 * perform lazy restoration of the host MSRs while leaving VT-x.
1436 *
1437 * @param pVCpu Pointer to the VMCPU.
1438 *
1439 * @remarks No-long-jump zone!!!
1440 */
1441static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1442{
1443 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1444
1445 /*
1446 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1447 */
1448 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1449 {
1450 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1451 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1452 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1453 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1454 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1455 }
1456}
1457
1458
1459/**
1460 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1461 * lazily while leaving VT-x.
1462 *
1463 * @returns true if it does, false otherwise.
1464 * @param pVCpu Pointer to the VMCPU.
1465 * @param uMsr The MSR to check.
1466 */
1467static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1468{
1469 NOREF(pVCpu);
1470 switch (uMsr)
1471 {
1472 case MSR_K8_LSTAR:
1473 case MSR_K6_STAR:
1474 case MSR_K8_SF_MASK:
1475 case MSR_K8_KERNEL_GS_BASE:
1476 return true;
1477 }
1478 return false;
1479}
1480
1481
1482/**
1483 * Saves a set of guest MSRs back into the guest-CPU context.
1484 *
1485 * @param pVCpu Pointer to the VMCPU.
1486 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1487 * out-of-sync. Make sure to update the required fields
1488 * before using them.
1489 *
1490 * @remarks No-long-jump zone!!!
1491 */
1492static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1493{
1494 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1495 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1496
1497 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1498 {
1499 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1500 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1501 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1502 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1503 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1504 }
1505}
1506
1507
1508/**
1509 * Loads a set of guests MSRs to allow read/passthru to the guest.
1510 *
1511 * The name of this function is slightly confusing. This function does NOT
1512 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1513 * common prefix for functions dealing with "lazy restoration" of the shared
1514 * MSRs.
1515 *
1516 * @param pVCpu Pointer to the VMCPU.
1517 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1518 * out-of-sync. Make sure to update the required fields
1519 * before using them.
1520 *
1521 * @remarks No-long-jump zone!!!
1522 */
1523static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1524{
1525 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1526 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1527
1528#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1529 do { \
1530 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1531 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1532 else \
1533 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1534 } while (0)
1535
1536 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1537 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1538 {
1539 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1540 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1541 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1542 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1543 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1544 }
1545 else
1546 {
1547 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1548 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1549 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1550 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1551 }
1552
1553#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1554}
1555
1556
1557/**
1558 * Performs lazy restoration of the set of host MSRs if they were previously
1559 * loaded with guest MSR values.
1560 *
1561 * @param pVCpu Pointer to the VMCPU.
1562 *
1563 * @remarks No-long-jump zone!!!
1564 * @remarks The guest MSRs should have been saved back into the guest-CPU
1565 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1566 */
1567static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1568{
1569 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1570 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1571
1572 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1573 {
1574 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1575 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1576 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1577 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1578 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1579 }
1580 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1581}
1582#endif /* HC_ARCH_BITS == 64 */
1583
1584
1585/**
1586 * Verifies that our cached values of the VMCS controls are all
1587 * consistent with what's actually present in the VMCS.
1588 *
1589 * @returns VBox status code.
1590 * @param pVCpu Pointer to the VMCPU.
1591 */
1592static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1593{
1594 uint32_t u32Val;
1595 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1596 AssertRCReturn(rc, rc);
1597 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1598 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1599
1600 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1601 AssertRCReturn(rc, rc);
1602 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1603 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1604
1605 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1606 AssertRCReturn(rc, rc);
1607 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1608 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1609
1610 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1611 AssertRCReturn(rc, rc);
1612 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1613 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1614
1615 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1616 {
1617 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1618 AssertRCReturn(rc, rc);
1619 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1620 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1621 }
1622
1623 return VINF_SUCCESS;
1624}
1625
1626
1627#ifdef VBOX_STRICT
1628/**
1629 * Verifies that our cached host EFER value has not changed
1630 * since we cached it.
1631 *
1632 * @param pVCpu Pointer to the VMCPU.
1633 */
1634static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1635{
1636 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1637
1638 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1639 {
1640 uint64_t u64Val;
1641 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1642 AssertRC(rc);
1643
1644 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1645 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1646 }
1647}
1648
1649
1650/**
1651 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1652 * VMCS are correct.
1653 *
1654 * @param pVCpu Pointer to the VMCPU.
1655 */
1656static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1657{
1658 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1659
1660 /* Verify MSR counts in the VMCS are what we think it should be. */
1661 uint32_t cMsrs;
1662 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1663 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1664
1665 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1666 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1667
1668 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1669 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1670
1671 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1672 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1673 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1674 {
1675 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1676 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1677 pGuestMsr->u32Msr, cMsrs));
1678
1679 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1680 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1681 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1682
1683 /* Verify that the permissions are as expected in the MSR bitmap. */
1684 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1685 {
1686 VMXMSREXITREAD enmRead;
1687 VMXMSREXITWRITE enmWrite;
1688 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1689 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1690 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1691 {
1692 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1693 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1694 }
1695 else
1696 {
1697 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1698 pGuestMsr->u32Msr, cMsrs));
1699 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1700 pGuestMsr->u32Msr, cMsrs));
1701 }
1702 }
1703 }
1704}
1705#endif /* VBOX_STRICT */
1706
1707
1708/**
1709 * Flushes the TLB using EPT.
1710 *
1711 * @returns VBox status code.
1712 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1713 * enmFlush).
1714 * @param enmFlush Type of flush.
1715 *
1716 * @remarks Caller is responsible for making sure this function is called only
1717 * when NestedPaging is supported and providing @a enmFlush that is
1718 * supported by the CPU.
1719 * @remarks Can be called with interrupts disabled.
1720 */
1721static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1722{
1723 uint64_t au64Descriptor[2];
1724 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1725 au64Descriptor[0] = 0;
1726 else
1727 {
1728 Assert(pVCpu);
1729 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1730 }
1731 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1732
1733 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1734 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1735 rc));
1736 if ( RT_SUCCESS(rc)
1737 && pVCpu)
1738 {
1739 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1740 }
1741}
1742
1743
1744/**
1745 * Flushes the TLB using VPID.
1746 *
1747 * @returns VBox status code.
1748 * @param pVM Pointer to the VM.
1749 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1750 * enmFlush).
1751 * @param enmFlush Type of flush.
1752 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1753 * on @a enmFlush).
1754 *
1755 * @remarks Can be called with interrupts disabled.
1756 */
1757static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1758{
1759 NOREF(pVM);
1760 AssertPtr(pVM);
1761 Assert(pVM->hm.s.vmx.fVpid);
1762
1763 uint64_t au64Descriptor[2];
1764 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1765 {
1766 au64Descriptor[0] = 0;
1767 au64Descriptor[1] = 0;
1768 }
1769 else
1770 {
1771 AssertPtr(pVCpu);
1772 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1773 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1774 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1775 au64Descriptor[1] = GCPtr;
1776 }
1777
1778 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1779 AssertMsg(rc == VINF_SUCCESS,
1780 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1781 if ( RT_SUCCESS(rc)
1782 && pVCpu)
1783 {
1784 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1785 }
1786}
1787
1788
1789/**
1790 * Invalidates a guest page by guest virtual address. Only relevant for
1791 * EPT/VPID, otherwise there is nothing really to invalidate.
1792 *
1793 * @returns VBox status code.
1794 * @param pVM Pointer to the VM.
1795 * @param pVCpu Pointer to the VMCPU.
1796 * @param GCVirt Guest virtual address of the page to invalidate.
1797 */
1798VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1799{
1800 AssertPtr(pVM);
1801 AssertPtr(pVCpu);
1802 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1803
1804 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1805 if (!fFlushPending)
1806 {
1807 /*
1808 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1809 * See @bugref{6043} and @bugref{6177}.
1810 *
1811 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1812 * function maybe called in a loop with individual addresses.
1813 */
1814 if (pVM->hm.s.vmx.fVpid)
1815 {
1816 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1817 {
1818 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1819 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1820 }
1821 else
1822 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1823 }
1824 else if (pVM->hm.s.fNestedPaging)
1825 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1826 }
1827
1828 return VINF_SUCCESS;
1829}
1830
1831
1832/**
1833 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1834 * otherwise there is nothing really to invalidate.
1835 *
1836 * @returns VBox status code.
1837 * @param pVM Pointer to the VM.
1838 * @param pVCpu Pointer to the VMCPU.
1839 * @param GCPhys Guest physical address of the page to invalidate.
1840 */
1841VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1842{
1843 NOREF(pVM); NOREF(GCPhys);
1844 LogFlowFunc(("%RGp\n", GCPhys));
1845
1846 /*
1847 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1848 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1849 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1850 */
1851 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1852 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1853 return VINF_SUCCESS;
1854}
1855
1856
1857/**
1858 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1859 * case where neither EPT nor VPID is supported by the CPU.
1860 *
1861 * @param pVM Pointer to the VM.
1862 * @param pVCpu Pointer to the VMCPU.
1863 * @param pCpu Pointer to the global HM struct.
1864 *
1865 * @remarks Called with interrupts disabled.
1866 */
1867static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1868{
1869 AssertPtr(pVCpu);
1870 AssertPtr(pCpu);
1871 NOREF(pVM);
1872
1873 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1874
1875 /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
1876#if 0
1877 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1878 pVCpu->hm.s.TlbShootdown.cPages = 0;
1879#endif
1880
1881 Assert(pCpu->idCpu != NIL_RTCPUID);
1882 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1883 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1884 pVCpu->hm.s.fForceTLBFlush = false;
1885 return;
1886}
1887
1888
1889/**
1890 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1891 *
1892 * @param pVM Pointer to the VM.
1893 * @param pVCpu Pointer to the VMCPU.
1894 * @param pCpu Pointer to the global HM CPU struct.
1895 * @remarks All references to "ASID" in this function pertains to "VPID" in
1896 * Intel's nomenclature. The reason is, to avoid confusion in compare
1897 * statements since the host-CPU copies are named "ASID".
1898 *
1899 * @remarks Called with interrupts disabled.
1900 */
1901static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1902{
1903#ifdef VBOX_WITH_STATISTICS
1904 bool fTlbFlushed = false;
1905# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1906# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1907 if (!fTlbFlushed) \
1908 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1909 } while (0)
1910#else
1911# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1912# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1913#endif
1914
1915 AssertPtr(pVM);
1916 AssertPtr(pCpu);
1917 AssertPtr(pVCpu);
1918 Assert(pCpu->idCpu != NIL_RTCPUID);
1919
1920 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1921 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1922 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1923
1924 /*
1925 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1926 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1927 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1928 */
1929 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1930 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1931 {
1932 ++pCpu->uCurrentAsid;
1933 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1934 {
1935 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1936 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1937 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1938 }
1939
1940 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1941 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1942 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1943
1944 /*
1945 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1946 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1947 */
1948 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1949 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1950 HMVMX_SET_TAGGED_TLB_FLUSHED();
1951 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1952 }
1953
1954 /* Check for explicit TLB shootdowns. */
1955 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1956 {
1957 /*
1958 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1959 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1960 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1961 * but not guest-physical mappings.
1962 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1963 */
1964 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1965 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1966 HMVMX_SET_TAGGED_TLB_FLUSHED();
1967 }
1968
1969 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
1970 * where it is commented out. Support individual entry flushing
1971 * someday. */
1972#if 0
1973 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1974 {
1975 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1976
1977 /*
1978 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1979 * as supported by the CPU.
1980 */
1981 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1982 {
1983 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1984 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1985 }
1986 else
1987 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1988
1989 HMVMX_SET_TAGGED_TLB_FLUSHED();
1990 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1991 pVCpu->hm.s.TlbShootdown.cPages = 0;
1992 }
1993#endif
1994
1995 pVCpu->hm.s.fForceTLBFlush = false;
1996
1997 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1998
1999 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2000 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2001 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2002 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2003 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2004 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2005 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2006 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2007 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2008
2009 /* Update VMCS with the VPID. */
2010 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2011 AssertRC(rc);
2012
2013#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2014}
2015
2016
2017/**
2018 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2019 *
2020 * @returns VBox status code.
2021 * @param pVM Pointer to the VM.
2022 * @param pVCpu Pointer to the VMCPU.
2023 * @param pCpu Pointer to the global HM CPU struct.
2024 *
2025 * @remarks Called with interrupts disabled.
2026 */
2027static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2028{
2029 AssertPtr(pVM);
2030 AssertPtr(pVCpu);
2031 AssertPtr(pCpu);
2032 Assert(pCpu->idCpu != NIL_RTCPUID);
2033 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2034 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2035
2036 /*
2037 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2038 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2039 */
2040 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2041 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2042 {
2043 pVCpu->hm.s.fForceTLBFlush = true;
2044 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2045 }
2046
2047 /* Check for explicit TLB shootdown flushes. */
2048 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2049 {
2050 pVCpu->hm.s.fForceTLBFlush = true;
2051 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2052 }
2053
2054 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2055 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2056
2057 if (pVCpu->hm.s.fForceTLBFlush)
2058 {
2059 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2060 pVCpu->hm.s.fForceTLBFlush = false;
2061 }
2062 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2063 * where it is commented out. Support individual entry flushing
2064 * someday. */
2065#if 0
2066 else
2067 {
2068 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2069 {
2070 /* We cannot flush individual entries without VPID support. Flush using EPT. */
2071 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2072 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2073 }
2074 else
2075 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2076
2077 pVCpu->hm.s.TlbShootdown.cPages = 0;
2078 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2079 }
2080#endif
2081}
2082
2083
2084/**
2085 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2086 *
2087 * @returns VBox status code.
2088 * @param pVM Pointer to the VM.
2089 * @param pVCpu Pointer to the VMCPU.
2090 * @param pCpu Pointer to the global HM CPU struct.
2091 *
2092 * @remarks Called with interrupts disabled.
2093 */
2094static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2095{
2096 AssertPtr(pVM);
2097 AssertPtr(pVCpu);
2098 AssertPtr(pCpu);
2099 Assert(pCpu->idCpu != NIL_RTCPUID);
2100 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2101 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2102
2103 /*
2104 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2105 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2106 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2107 */
2108 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2109 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2110 {
2111 pVCpu->hm.s.fForceTLBFlush = true;
2112 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2113 }
2114
2115 /* Check for explicit TLB shootdown flushes. */
2116 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2117 {
2118 /*
2119 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2120 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2121 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2122 */
2123 pVCpu->hm.s.fForceTLBFlush = true;
2124 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2125 }
2126
2127 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2128 if (pVCpu->hm.s.fForceTLBFlush)
2129 {
2130 ++pCpu->uCurrentAsid;
2131 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2132 {
2133 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2134 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2135 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2136 }
2137
2138 pVCpu->hm.s.fForceTLBFlush = false;
2139 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2140 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2141 if (pCpu->fFlushAsidBeforeUse)
2142 {
2143 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2144 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2145 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2146 {
2147 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2148 pCpu->fFlushAsidBeforeUse = false;
2149 }
2150 else
2151 {
2152 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2153 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2154 }
2155 }
2156 }
2157 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2158 * where it is commented out. Support individual entry flushing
2159 * someday. */
2160#if 0
2161 else
2162 {
2163 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2164 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2165 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2166 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2167
2168 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2169 {
2170 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
2171 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2172 {
2173 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2174 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2175 }
2176 else
2177 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2178
2179 pVCpu->hm.s.TlbShootdown.cPages = 0;
2180 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2181 }
2182 else
2183 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2184 }
2185#endif
2186
2187 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2188 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2189 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2190 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2191 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2192 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2193 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2194
2195 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2196 AssertRC(rc);
2197}
2198
2199
2200/**
2201 * Flushes the guest TLB entry based on CPU capabilities.
2202 *
2203 * @param pVCpu Pointer to the VMCPU.
2204 * @param pCpu Pointer to the global HM CPU struct.
2205 */
2206DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2207{
2208#ifdef HMVMX_ALWAYS_FLUSH_TLB
2209 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2210#endif
2211 PVM pVM = pVCpu->CTX_SUFF(pVM);
2212 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2213 {
2214 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2215 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2216 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2217 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2218 default:
2219 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2220 break;
2221 }
2222
2223 /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
2224 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2225
2226 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2227}
2228
2229
2230/**
2231 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2232 * TLB entries from the host TLB before VM-entry.
2233 *
2234 * @returns VBox status code.
2235 * @param pVM Pointer to the VM.
2236 */
2237static int hmR0VmxSetupTaggedTlb(PVM pVM)
2238{
2239 /*
2240 * Determine optimal flush type for Nested Paging.
2241 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2242 * guest execution (see hmR3InitFinalizeR0()).
2243 */
2244 if (pVM->hm.s.fNestedPaging)
2245 {
2246 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2247 {
2248 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2249 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2250 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2251 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2252 else
2253 {
2254 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2255 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2256 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2257 }
2258
2259 /* Make sure the write-back cacheable memory type for EPT is supported. */
2260 if (!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
2261 {
2262 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
2263 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2264 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2265 }
2266 }
2267 else
2268 {
2269 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2270 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2271 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2272 }
2273 }
2274
2275 /*
2276 * Determine optimal flush type for VPID.
2277 */
2278 if (pVM->hm.s.vmx.fVpid)
2279 {
2280 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2281 {
2282 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2283 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2284 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2285 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2286 else
2287 {
2288 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2289 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2290 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2291 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2292 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2293 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2294 pVM->hm.s.vmx.fVpid = false;
2295 }
2296 }
2297 else
2298 {
2299 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2300 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2301 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2302 pVM->hm.s.vmx.fVpid = false;
2303 }
2304 }
2305
2306 /*
2307 * Setup the handler for flushing tagged-TLBs.
2308 */
2309 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2310 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2311 else if (pVM->hm.s.fNestedPaging)
2312 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2313 else if (pVM->hm.s.vmx.fVpid)
2314 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2315 else
2316 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2317 return VINF_SUCCESS;
2318}
2319
2320
2321/**
2322 * Sets up pin-based VM-execution controls in the VMCS.
2323 *
2324 * @returns VBox status code.
2325 * @param pVM Pointer to the VM.
2326 * @param pVCpu Pointer to the VMCPU.
2327 */
2328static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2329{
2330 AssertPtr(pVM);
2331 AssertPtr(pVCpu);
2332
2333 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2334 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2335
2336 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2337 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2338
2339 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2340 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2341
2342 /* Enable the VMX preemption timer. */
2343 if (pVM->hm.s.vmx.fUsePreemptTimer)
2344 {
2345 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2346 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2347 }
2348
2349 if ((val & zap) != val)
2350 {
2351 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2352 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2353 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2354 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2355 }
2356
2357 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2358 AssertRCReturn(rc, rc);
2359
2360 pVCpu->hm.s.vmx.u32PinCtls = val;
2361 return rc;
2362}
2363
2364
2365/**
2366 * Sets up processor-based VM-execution controls in the VMCS.
2367 *
2368 * @returns VBox status code.
2369 * @param pVM Pointer to the VM.
2370 * @param pVMCPU Pointer to the VMCPU.
2371 */
2372static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2373{
2374 AssertPtr(pVM);
2375 AssertPtr(pVCpu);
2376
2377 int rc = VERR_INTERNAL_ERROR_5;
2378 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2379 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2380
2381 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2382 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2383 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2384 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2385 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2386 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2387 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2388
2389 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2390 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2391 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2392 {
2393 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2394 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2395 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2396 }
2397
2398 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2399 if (!pVM->hm.s.fNestedPaging)
2400 {
2401 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2402 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2403 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2404 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2405 }
2406
2407 /* Use TPR shadowing if supported by the CPU. */
2408 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2409 {
2410 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2411 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2412 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2413 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2414 AssertRCReturn(rc, rc);
2415
2416 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2417 /* CR8 writes cause a VM-exit based on TPR threshold. */
2418 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2419 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2420 }
2421 else
2422 {
2423 /*
2424 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2425 * Set this control only for 64-bit guests.
2426 */
2427 if (pVM->hm.s.fAllow64BitGuests)
2428 {
2429 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2430 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2431 }
2432 }
2433
2434 /* Use MSR-bitmaps if supported by the CPU. */
2435 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2436 {
2437 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2438
2439 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2440 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2441 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2442 AssertRCReturn(rc, rc);
2443
2444 /*
2445 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2446 * automatically using dedicated fields in the VMCS.
2447 */
2448 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2449 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2450 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2451 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2452 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2453
2454#if HC_ARCH_BITS == 64
2455 /*
2456 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2457 */
2458 if (pVM->hm.s.fAllow64BitGuests)
2459 {
2460 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2461 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2462 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2463 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2464 }
2465#endif
2466 }
2467
2468 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2469 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2470 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2471
2472 if ((val & zap) != val)
2473 {
2474 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2475 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2476 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2477 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2478 }
2479
2480 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2481 AssertRCReturn(rc, rc);
2482
2483 pVCpu->hm.s.vmx.u32ProcCtls = val;
2484
2485 /*
2486 * Secondary processor-based VM-execution controls.
2487 */
2488 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2489 {
2490 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2491 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2492
2493 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2494 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2495
2496 if (pVM->hm.s.fNestedPaging)
2497 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2498 else
2499 {
2500 /*
2501 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2502 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2503 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2504 */
2505 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2506 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2507 }
2508
2509 if (pVM->hm.s.vmx.fVpid)
2510 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2511
2512 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2513 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2514
2515 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2516 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2517 * done dynamically. */
2518 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2519 {
2520 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2521 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2522 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2523 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2524 AssertRCReturn(rc, rc);
2525 }
2526
2527 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2528 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2529
2530 if ((val & zap) != val)
2531 {
2532 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
2533 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2534 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2535 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2536 }
2537
2538 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2539 AssertRCReturn(rc, rc);
2540
2541 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2542 }
2543 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2544 {
2545 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2546 "available\n"));
2547 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2548 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2549 }
2550
2551 return VINF_SUCCESS;
2552}
2553
2554
2555/**
2556 * Sets up miscellaneous (everything other than Pin & Processor-based
2557 * VM-execution) control fields in the VMCS.
2558 *
2559 * @returns VBox status code.
2560 * @param pVM Pointer to the VM.
2561 * @param pVCpu Pointer to the VMCPU.
2562 */
2563static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2564{
2565 NOREF(pVM);
2566 AssertPtr(pVM);
2567 AssertPtr(pVCpu);
2568
2569 int rc = VERR_GENERAL_FAILURE;
2570
2571 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2572#if 0
2573 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2574 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2575 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2576
2577 /*
2578 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2579 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2580 * We thus use the exception bitmap to control it rather than use both.
2581 */
2582 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2583 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2584
2585 /** @todo Explore possibility of using IO-bitmaps. */
2586 /* All IO & IOIO instructions cause VM-exits. */
2587 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2588 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2589
2590 /* Initialize the MSR-bitmap area. */
2591 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2592 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2593 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2594#endif
2595
2596 /* Setup MSR auto-load/store area. */
2597 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2598 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2599 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2600 AssertRCReturn(rc, rc);
2601 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2602 AssertRCReturn(rc, rc);
2603
2604 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2605 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2606 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2607 AssertRCReturn(rc, rc);
2608
2609 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2610 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2611 AssertRCReturn(rc, rc);
2612
2613 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2614#if 0
2615 /* Setup debug controls */
2616 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2617 AssertRCReturn(rc, rc);
2618 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2619 AssertRCReturn(rc, rc);
2620#endif
2621
2622 return rc;
2623}
2624
2625
2626/**
2627 * Sets up the initial exception bitmap in the VMCS based on static conditions
2628 * (i.e. conditions that cannot ever change after starting the VM).
2629 *
2630 * @returns VBox status code.
2631 * @param pVM Pointer to the VM.
2632 * @param pVCpu Pointer to the VMCPU.
2633 */
2634static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2635{
2636 AssertPtr(pVM);
2637 AssertPtr(pVCpu);
2638
2639 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2640
2641 uint32_t u32XcptBitmap = 0;
2642
2643 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2644 if (!pVM->hm.s.fNestedPaging)
2645 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2646
2647 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2648 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2649 AssertRCReturn(rc, rc);
2650 return rc;
2651}
2652
2653
2654/**
2655 * Sets up the initial guest-state mask. The guest-state mask is consulted
2656 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2657 * for the nested virtualization case (as it would cause a VM-exit).
2658 *
2659 * @param pVCpu Pointer to the VMCPU.
2660 */
2661static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2662{
2663 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2664 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2665 return VINF_SUCCESS;
2666}
2667
2668
2669/**
2670 * Does per-VM VT-x initialization.
2671 *
2672 * @returns VBox status code.
2673 * @param pVM Pointer to the VM.
2674 */
2675VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2676{
2677 LogFlowFunc(("pVM=%p\n", pVM));
2678
2679 int rc = hmR0VmxStructsAlloc(pVM);
2680 if (RT_FAILURE(rc))
2681 {
2682 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2683 return rc;
2684 }
2685
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Does per-VM VT-x termination.
2692 *
2693 * @returns VBox status code.
2694 * @param pVM Pointer to the VM.
2695 */
2696VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2697{
2698 LogFlowFunc(("pVM=%p\n", pVM));
2699
2700#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2701 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2702 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2703#endif
2704 hmR0VmxStructsFree(pVM);
2705 return VINF_SUCCESS;
2706}
2707
2708
2709/**
2710 * Sets up the VM for execution under VT-x.
2711 * This function is only called once per-VM during initialization.
2712 *
2713 * @returns VBox status code.
2714 * @param pVM Pointer to the VM.
2715 */
2716VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2717{
2718 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2719 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2720
2721 LogFlowFunc(("pVM=%p\n", pVM));
2722
2723 /*
2724 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2725 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
2726 */
2727 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2728 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2729 || !pVM->hm.s.vmx.pRealModeTSS))
2730 {
2731 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2732 return VERR_INTERNAL_ERROR;
2733 }
2734
2735#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2736 /*
2737 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2738 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2739 */
2740 if ( pVM->hm.s.fAllow64BitGuests
2741 && !HMVMX_IS_64BIT_HOST_MODE())
2742 {
2743 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2744 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2745 }
2746#endif
2747
2748 /* Initialize these always, see hmR3InitFinalizeR0().*/
2749 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2750 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2751
2752 /* Setup the tagged-TLB flush handlers. */
2753 int rc = hmR0VmxSetupTaggedTlb(pVM);
2754 if (RT_FAILURE(rc))
2755 {
2756 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2757 return rc;
2758 }
2759
2760 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2761 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2762#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2763 if ( HMVMX_IS_64BIT_HOST_MODE()
2764 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2765 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2766 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2767 {
2768 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2769 }
2770#endif
2771
2772 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2773 {
2774 PVMCPU pVCpu = &pVM->aCpus[i];
2775 AssertPtr(pVCpu);
2776 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2777
2778 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2779 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2780
2781 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2782 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2783 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2784
2785 /* Set revision dword at the beginning of the VMCS structure. */
2786 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2787
2788 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2789 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2791 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2792
2793 /* Load this VMCS as the current VMCS. */
2794 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2795 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2796 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2797
2798 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2799 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2800 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2801
2802 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2803 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2804 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2805
2806 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2807 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2808 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2809
2810 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2811 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2812 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2813
2814 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2815 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2816 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2817
2818#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2819 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2820 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2821 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2822#endif
2823
2824 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2825 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2826 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2827 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2828
2829 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2830
2831 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2832 }
2833
2834 return VINF_SUCCESS;
2835}
2836
2837
2838/**
2839 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2840 * the VMCS.
2841 *
2842 * @returns VBox status code.
2843 * @param pVM Pointer to the VM.
2844 * @param pVCpu Pointer to the VMCPU.
2845 */
2846DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2847{
2848 NOREF(pVM); NOREF(pVCpu);
2849
2850 RTCCUINTREG uReg = ASMGetCR0();
2851 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2852 AssertRCReturn(rc, rc);
2853
2854#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2855 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2856 if (HMVMX_IS_64BIT_HOST_MODE())
2857 {
2858 uint64_t uRegCR3 = HMR0Get64bitCR3();
2859 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2860 }
2861 else
2862#endif
2863 {
2864 uReg = ASMGetCR3();
2865 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2866 }
2867 AssertRCReturn(rc, rc);
2868
2869 uReg = ASMGetCR4();
2870 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2871 AssertRCReturn(rc, rc);
2872 return rc;
2873}
2874
2875
2876#if HC_ARCH_BITS == 64
2877/**
2878 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2879 * requirements. See hmR0VmxSaveHostSegmentRegs().
2880 */
2881# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2882 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2883 { \
2884 bool fValidSelector = true; \
2885 if ((selValue) & X86_SEL_LDT) \
2886 { \
2887 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2888 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2889 } \
2890 if (fValidSelector) \
2891 { \
2892 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2893 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2894 } \
2895 (selValue) = 0; \
2896 }
2897#endif
2898
2899
2900/**
2901 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2902 * the host-state area in the VMCS.
2903 *
2904 * @returns VBox status code.
2905 * @param pVM Pointer to the VM.
2906 * @param pVCpu Pointer to the VMCPU.
2907 */
2908DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2909{
2910 int rc = VERR_INTERNAL_ERROR_5;
2911
2912#if HC_ARCH_BITS == 64
2913 /*
2914 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2915 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2916 */
2917 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2918 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2919#endif
2920
2921 /*
2922 * Host DS, ES, FS and GS segment registers.
2923 */
2924#if HC_ARCH_BITS == 64
2925 RTSEL uSelDS = ASMGetDS();
2926 RTSEL uSelES = ASMGetES();
2927 RTSEL uSelFS = ASMGetFS();
2928 RTSEL uSelGS = ASMGetGS();
2929#else
2930 RTSEL uSelDS = 0;
2931 RTSEL uSelES = 0;
2932 RTSEL uSelFS = 0;
2933 RTSEL uSelGS = 0;
2934#endif
2935
2936 /* Recalculate which host-state bits need to be manually restored. */
2937 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2938
2939 /*
2940 * Host CS and SS segment registers.
2941 */
2942#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2943 RTSEL uSelCS;
2944 RTSEL uSelSS;
2945 if (HMVMX_IS_64BIT_HOST_MODE())
2946 {
2947 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2948 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2949 }
2950 else
2951 {
2952 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2953 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2954 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2955 }
2956#else
2957 RTSEL uSelCS = ASMGetCS();
2958 RTSEL uSelSS = ASMGetSS();
2959#endif
2960
2961 /*
2962 * Host TR segment register.
2963 */
2964 RTSEL uSelTR = ASMGetTR();
2965
2966#if HC_ARCH_BITS == 64
2967 /*
2968 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2969 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2970 */
2971 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2972 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2973 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2974 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2975# undef VMXLOCAL_ADJUST_HOST_SEG
2976#endif
2977
2978 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2979 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2980 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2981 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2982 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2983 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2984 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2985 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2986 Assert(uSelCS);
2987 Assert(uSelTR);
2988
2989 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2990#if 0
2991 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2992 Assert(uSelSS != 0);
2993#endif
2994
2995 /* Write these host selector fields into the host-state area in the VMCS. */
2996 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2997 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2998#if HC_ARCH_BITS == 64
2999 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
3000 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
3001 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
3002 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
3003#endif
3004 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
3005
3006 /*
3007 * Host GDTR and IDTR.
3008 */
3009 RTGDTR Gdtr;
3010 RT_ZERO(Gdtr);
3011#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3012 if (HMVMX_IS_64BIT_HOST_MODE())
3013 {
3014 X86XDTR64 Gdtr64;
3015 X86XDTR64 Idtr64;
3016 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
3017 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
3018 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
3019
3020 Gdtr.cbGdt = Gdtr64.cb;
3021 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
3022 }
3023 else
3024#endif
3025 {
3026 RTIDTR Idtr;
3027 ASMGetGDTR(&Gdtr);
3028 ASMGetIDTR(&Idtr);
3029 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
3030 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
3031
3032#if HC_ARCH_BITS == 64
3033 /*
3034 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
3035 * maximum limit (0xffff) on every VM-exit.
3036 */
3037 if (Gdtr.cbGdt != 0xffff)
3038 {
3039 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3040 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3041 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3042 }
3043
3044 /*
3045 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
3046 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
3047 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
3048 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
3049 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
3050 * hosts where we are pretty sure it won't cause trouble.
3051 */
3052# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3053 if (Idtr.cbIdt < 0x0fff)
3054# else
3055 if (Idtr.cbIdt != 0xffff)
3056# endif
3057 {
3058 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3059 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3060 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3061 }
3062#endif
3063 }
3064
3065 /*
3066 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
3067 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
3068 */
3069 if ((uSelTR | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
3070 {
3071 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
3072 return VERR_VMX_INVALID_HOST_STATE;
3073 }
3074
3075 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3076#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3077 if (HMVMX_IS_64BIT_HOST_MODE())
3078 {
3079 /* We need the 64-bit TR base for hybrid darwin. */
3080 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
3081 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
3082 }
3083 else
3084#endif
3085 {
3086 uintptr_t uTRBase;
3087#if HC_ARCH_BITS == 64
3088 uTRBase = X86DESC64_BASE(pDesc);
3089
3090 /*
3091 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3092 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3093 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3094 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3095 *
3096 * [1] See Intel spec. 3.5 "System Descriptor Types".
3097 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3098 */
3099 Assert(pDesc->System.u4Type == 11);
3100 if ( pDesc->System.u16LimitLow != 0x67
3101 || pDesc->System.u4LimitHigh)
3102 {
3103 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3104 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3105 if (pVM->hm.s.uHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3106 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3107 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3108
3109 /* Store the GDTR here as we need it while restoring TR. */
3110 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3111 }
3112#else
3113 uTRBase = X86DESC_BASE(pDesc);
3114#endif
3115 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3116 }
3117 AssertRCReturn(rc, rc);
3118
3119 /*
3120 * Host FS base and GS base.
3121 */
3122#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3123 if (HMVMX_IS_64BIT_HOST_MODE())
3124 {
3125 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3126 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3127 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3128 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3129
3130# if HC_ARCH_BITS == 64
3131 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3132 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3133 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3134 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3135 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3136# endif
3137 }
3138#endif
3139 return rc;
3140}
3141
3142
3143/**
3144 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3145 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3146 * the host after every successful VM-exit.
3147 *
3148 * @returns VBox status code.
3149 * @param pVM Pointer to the VM.
3150 * @param pVCpu Pointer to the VMCPU.
3151 *
3152 * @remarks No-long-jump zone!!!
3153 */
3154DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3155{
3156 NOREF(pVM);
3157
3158 AssertPtr(pVCpu);
3159 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3160
3161 int rc = VINF_SUCCESS;
3162#if HC_ARCH_BITS == 64
3163 if (pVM->hm.s.fAllow64BitGuests)
3164 hmR0VmxLazySaveHostMsrs(pVCpu);
3165#endif
3166
3167 /*
3168 * Host Sysenter MSRs.
3169 */
3170 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3171 AssertRCReturn(rc, rc);
3172#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3173 if (HMVMX_IS_64BIT_HOST_MODE())
3174 {
3175 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3176 AssertRCReturn(rc, rc);
3177 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3178 }
3179 else
3180 {
3181 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3182 AssertRCReturn(rc, rc);
3183 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3184 }
3185#elif HC_ARCH_BITS == 32
3186 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3187 AssertRCReturn(rc, rc);
3188 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3189#else
3190 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3191 AssertRCReturn(rc, rc);
3192 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3193#endif
3194 AssertRCReturn(rc, rc);
3195
3196 /*
3197 * Host EFER MSR.
3198 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3199 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3200 */
3201 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3202 {
3203 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3204 AssertRCReturn(rc, rc);
3205 }
3206
3207 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3208 * hmR0VmxLoadGuestExitCtls() !! */
3209
3210 return rc;
3211}
3212
3213
3214/**
3215 * Figures out if we need to swap the EFER MSR which is
3216 * particularly expensive.
3217 *
3218 * We check all relevant bits. For now, that's everything
3219 * besides LMA/LME, as these two bits are handled by VM-entry,
3220 * see hmR0VmxLoadGuestExitCtls() and
3221 * hmR0VMxLoadGuestEntryCtls().
3222 *
3223 * @returns true if we need to load guest EFER, false otherwise.
3224 * @param pVCpu Pointer to the VMCPU.
3225 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3226 * out-of-sync. Make sure to update the required fields
3227 * before using them.
3228 *
3229 * @remarks Requires EFER, CR4.
3230 * @remarks No-long-jump zone!!!
3231 */
3232static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3233{
3234#ifdef HMVMX_ALWAYS_SWAP_EFER
3235 return true;
3236#endif
3237
3238#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3239 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3240 if (CPUMIsGuestInLongMode(pVCpu))
3241 return false;
3242#endif
3243
3244 PVM pVM = pVCpu->CTX_SUFF(pVM);
3245 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3246 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3247
3248 /*
3249 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3250 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3251 */
3252 if ( CPUMIsGuestInLongMode(pVCpu)
3253 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3254 {
3255 return true;
3256 }
3257
3258 /*
3259 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it .
3260 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3261 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3262 */
3263 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3264 && (pMixedCtx->cr0 & X86_CR0_PG)
3265 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3266 {
3267 /* Assert that host is PAE capable. */
3268 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3269 return true;
3270 }
3271
3272 /** @todo Check the latest Intel spec. for any other bits,
3273 * like SMEP/SMAP? */
3274 return false;
3275}
3276
3277
3278/**
3279 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3280 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3281 * controls".
3282 *
3283 * @returns VBox status code.
3284 * @param pVCpu Pointer to the VMCPU.
3285 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3286 * out-of-sync. Make sure to update the required fields
3287 * before using them.
3288 *
3289 * @remarks Requires EFER.
3290 * @remarks No-long-jump zone!!!
3291 */
3292DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3293{
3294 int rc = VINF_SUCCESS;
3295 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3296 {
3297 PVM pVM = pVCpu->CTX_SUFF(pVM);
3298 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3299 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3300
3301 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3302 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3303
3304 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3305 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3306 {
3307 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3308 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3309 }
3310 else
3311 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3312
3313 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3314 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3315 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3316 {
3317 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3318 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3319 }
3320
3321 /*
3322 * The following should -not- be set (since we're not in SMM mode):
3323 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3324 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3325 */
3326
3327 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3328 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3329
3330 if ((val & zap) != val)
3331 {
3332 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3333 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3334 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3335 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3336 }
3337
3338 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3339 AssertRCReturn(rc, rc);
3340
3341 pVCpu->hm.s.vmx.u32EntryCtls = val;
3342 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3343 }
3344 return rc;
3345}
3346
3347
3348/**
3349 * Sets up the VM-exit controls in the VMCS.
3350 *
3351 * @returns VBox status code.
3352 * @param pVM Pointer to the VM.
3353 * @param pVCpu Pointer to the VMCPU.
3354 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3355 * out-of-sync. Make sure to update the required fields
3356 * before using them.
3357 *
3358 * @remarks Requires EFER.
3359 */
3360DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3361{
3362 NOREF(pMixedCtx);
3363
3364 int rc = VINF_SUCCESS;
3365 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3366 {
3367 PVM pVM = pVCpu->CTX_SUFF(pVM);
3368 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3369 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3370
3371 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3372 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3373
3374 /*
3375 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3376 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3377 */
3378#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3379 if (HMVMX_IS_64BIT_HOST_MODE())
3380 {
3381 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3382 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3383 }
3384 else
3385 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3386#else
3387 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3388 {
3389 /* The switcher returns to long mode, EFER is managed by the switcher. */
3390 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3391 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3392 }
3393 else
3394 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3395#endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
3396
3397 /* If the newer VMCS fields for managing EFER exists, use it. */
3398 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3399 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3400 {
3401 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3402 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3403 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3404 }
3405
3406 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3407 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3408
3409 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3410 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3411 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3412
3413 if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
3414 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3415
3416 if ((val & zap) != val)
3417 {
3418 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3419 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3420 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3421 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3422 }
3423
3424 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3425 AssertRCReturn(rc, rc);
3426
3427 pVCpu->hm.s.vmx.u32ExitCtls = val;
3428 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3429 }
3430 return rc;
3431}
3432
3433
3434/**
3435 * Loads the guest APIC and related state.
3436 *
3437 * @returns VBox status code.
3438 * @param pVM Pointer to the VM.
3439 * @param pVCpu Pointer to the VMCPU.
3440 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3441 * out-of-sync. Make sure to update the required fields
3442 * before using them.
3443 */
3444DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3445{
3446 NOREF(pMixedCtx);
3447
3448 int rc = VINF_SUCCESS;
3449 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3450 {
3451 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3452 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3453 {
3454 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3455
3456 bool fPendingIntr = false;
3457 uint8_t u8Tpr = 0;
3458 uint8_t u8PendingIntr = 0;
3459 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3460 AssertRCReturn(rc, rc);
3461
3462 /*
3463 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3464 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3465 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3466 * the interrupt when we VM-exit for other reasons.
3467 */
3468 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3469 uint32_t u32TprThreshold = 0;
3470 if (fPendingIntr)
3471 {
3472 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3473 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3474 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3475 if (u8PendingPriority <= u8TprPriority)
3476 u32TprThreshold = u8PendingPriority;
3477 else
3478 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3479 }
3480 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3481
3482 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3483 AssertRCReturn(rc, rc);
3484 }
3485
3486 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3487 }
3488 return rc;
3489}
3490
3491
3492/**
3493 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3494 *
3495 * @returns Guest's interruptibility-state.
3496 * @param pVCpu Pointer to the VMCPU.
3497 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3498 * out-of-sync. Make sure to update the required fields
3499 * before using them.
3500 *
3501 * @remarks No-long-jump zone!!!
3502 */
3503DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3504{
3505 /*
3506 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3507 */
3508 uint32_t uIntrState = 0;
3509 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3510 {
3511 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3512 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3513 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3514 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3515 {
3516 if (pMixedCtx->eflags.Bits.u1IF)
3517 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3518 else
3519 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3520 }
3521 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3522 }
3523
3524 /*
3525 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3526 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3527 * setting this would block host-NMIs and IRET will not clear the blocking.
3528 *
3529 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3530 */
3531 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3532 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3533 {
3534 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3535 }
3536
3537 return uIntrState;
3538}
3539
3540
3541/**
3542 * Loads the guest's interruptibility-state into the guest-state area in the
3543 * VMCS.
3544 *
3545 * @returns VBox status code.
3546 * @param pVCpu Pointer to the VMCPU.
3547 * @param uIntrState The interruptibility-state to set.
3548 */
3549static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3550{
3551 NOREF(pVCpu);
3552 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3553 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3554 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3555 AssertRCReturn(rc, rc);
3556 return rc;
3557}
3558
3559
3560/**
3561 * Loads the guest's RIP into the guest-state area in the VMCS.
3562 *
3563 * @returns VBox status code.
3564 * @param pVCpu Pointer to the VMCPU.
3565 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3566 * out-of-sync. Make sure to update the required fields
3567 * before using them.
3568 *
3569 * @remarks No-long-jump zone!!!
3570 */
3571static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3572{
3573 int rc = VINF_SUCCESS;
3574 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3575 {
3576 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3577 AssertRCReturn(rc, rc);
3578
3579 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3580 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3581 HMCPU_CF_VALUE(pVCpu)));
3582 }
3583 return rc;
3584}
3585
3586
3587/**
3588 * Loads the guest's RSP into the guest-state area in the VMCS.
3589 *
3590 * @returns VBox status code.
3591 * @param pVCpu Pointer to the VMCPU.
3592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3593 * out-of-sync. Make sure to update the required fields
3594 * before using them.
3595 *
3596 * @remarks No-long-jump zone!!!
3597 */
3598static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3599{
3600 int rc = VINF_SUCCESS;
3601 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3602 {
3603 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3604 AssertRCReturn(rc, rc);
3605
3606 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3607 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3608 }
3609 return rc;
3610}
3611
3612
3613/**
3614 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3615 *
3616 * @returns VBox status code.
3617 * @param pVCpu Pointer to the VMCPU.
3618 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3619 * out-of-sync. Make sure to update the required fields
3620 * before using them.
3621 *
3622 * @remarks No-long-jump zone!!!
3623 */
3624static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3625{
3626 int rc = VINF_SUCCESS;
3627 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3628 {
3629 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3630 Let us assert it as such and use 32-bit VMWRITE. */
3631 Assert(!(pMixedCtx->rflags.u64 >> 32));
3632 X86EFLAGS Eflags = pMixedCtx->eflags;
3633 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3634 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3635
3636 /*
3637 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3638 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3639 */
3640 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3641 {
3642 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3643 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3644 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3645 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3646 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3647 }
3648
3649 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3650 AssertRCReturn(rc, rc);
3651
3652 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3653 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3654 }
3655 return rc;
3656}
3657
3658
3659/**
3660 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3661 *
3662 * @returns VBox status code.
3663 * @param pVCpu Pointer to the VMCPU.
3664 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3665 * out-of-sync. Make sure to update the required fields
3666 * before using them.
3667 *
3668 * @remarks No-long-jump zone!!!
3669 */
3670DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3671{
3672 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3673 AssertRCReturn(rc, rc);
3674 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3675 AssertRCReturn(rc, rc);
3676 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3677 AssertRCReturn(rc, rc);
3678 return rc;
3679}
3680
3681
3682/**
3683 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3684 * CR0 is partially shared with the host and we have to consider the FPU bits.
3685 *
3686 * @returns VBox status code.
3687 * @param pVM Pointer to the VM.
3688 * @param pVCpu Pointer to the VMCPU.
3689 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3690 * out-of-sync. Make sure to update the required fields
3691 * before using them.
3692 *
3693 * @remarks No-long-jump zone!!!
3694 */
3695static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3696{
3697 /*
3698 * Guest CR0.
3699 * Guest FPU.
3700 */
3701 int rc = VINF_SUCCESS;
3702 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3703 {
3704 Assert(!(pMixedCtx->cr0 >> 32));
3705 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3706 PVM pVM = pVCpu->CTX_SUFF(pVM);
3707
3708 /* The guest's view (read access) of its CR0 is unblemished. */
3709 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3710 AssertRCReturn(rc, rc);
3711 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3712
3713 /* Setup VT-x's view of the guest CR0. */
3714 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3715 if (pVM->hm.s.fNestedPaging)
3716 {
3717 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3718 {
3719 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3720 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3721 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3722 }
3723 else
3724 {
3725 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3726 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3727 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3728 }
3729
3730 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3731 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3732 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3733
3734 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3735 AssertRCReturn(rc, rc);
3736 }
3737 else
3738 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3739
3740 /*
3741 * Guest FPU bits.
3742 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3743 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3744 */
3745 u32GuestCR0 |= X86_CR0_NE;
3746 bool fInterceptNM = false;
3747 if (CPUMIsGuestFPUStateActive(pVCpu))
3748 {
3749 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3750 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3751 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3752 }
3753 else
3754 {
3755 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3756 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3757 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3758 }
3759
3760 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3761 bool fInterceptMF = false;
3762 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3763 fInterceptMF = true;
3764
3765 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3766 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3767 {
3768 Assert(PDMVmmDevHeapIsEnabled(pVM));
3769 Assert(pVM->hm.s.vmx.pRealModeTSS);
3770 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3771 fInterceptNM = true;
3772 fInterceptMF = true;
3773 }
3774 else
3775 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3776
3777 if (fInterceptNM)
3778 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3779 else
3780 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3781
3782 if (fInterceptMF)
3783 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3784 else
3785 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3786
3787 /* Additional intercepts for debugging, define these yourself explicitly. */
3788#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3789 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3790 | RT_BIT(X86_XCPT_BP)
3791 | RT_BIT(X86_XCPT_DB)
3792 | RT_BIT(X86_XCPT_DE)
3793 | RT_BIT(X86_XCPT_NM)
3794 | RT_BIT(X86_XCPT_TS)
3795 | RT_BIT(X86_XCPT_UD)
3796 | RT_BIT(X86_XCPT_NP)
3797 | RT_BIT(X86_XCPT_SS)
3798 | RT_BIT(X86_XCPT_GP)
3799 | RT_BIT(X86_XCPT_PF)
3800 | RT_BIT(X86_XCPT_MF)
3801 ;
3802#elif defined(HMVMX_ALWAYS_TRAP_PF)
3803 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3804#endif
3805
3806 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3807
3808 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3809 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3810 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3811 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3812 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3813 else
3814 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3815
3816 u32GuestCR0 |= uSetCR0;
3817 u32GuestCR0 &= uZapCR0;
3818 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3819
3820 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3821 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3822 AssertRCReturn(rc, rc);
3823 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3824 AssertRCReturn(rc, rc);
3825 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3826 uZapCR0));
3827
3828 /*
3829 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3830 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3831 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3832 */
3833 uint32_t u32CR0Mask = 0;
3834 u32CR0Mask = X86_CR0_PE
3835 | X86_CR0_NE
3836 | X86_CR0_WP
3837 | X86_CR0_PG
3838 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3839 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3840 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3841
3842 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3843 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3844 * and @bugref{6944}. */
3845#if 0
3846 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3847 u32CR0Mask &= ~X86_CR0_PE;
3848#endif
3849 if (pVM->hm.s.fNestedPaging)
3850 u32CR0Mask &= ~X86_CR0_WP;
3851
3852 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3853 if (fInterceptNM)
3854 {
3855 u32CR0Mask |= X86_CR0_TS
3856 | X86_CR0_MP;
3857 }
3858
3859 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3860 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3861 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3862 AssertRCReturn(rc, rc);
3863 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3864
3865 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3866 }
3867 return rc;
3868}
3869
3870
3871/**
3872 * Loads the guest control registers (CR3, CR4) into the guest-state area
3873 * in the VMCS.
3874 *
3875 * @returns VBox status code.
3876 * @param pVM Pointer to the VM.
3877 * @param pVCpu Pointer to the VMCPU.
3878 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3879 * out-of-sync. Make sure to update the required fields
3880 * before using them.
3881 *
3882 * @remarks No-long-jump zone!!!
3883 */
3884static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3885{
3886 int rc = VINF_SUCCESS;
3887 PVM pVM = pVCpu->CTX_SUFF(pVM);
3888
3889 /*
3890 * Guest CR2.
3891 * It's always loaded in the assembler code. Nothing to do here.
3892 */
3893
3894 /*
3895 * Guest CR3.
3896 */
3897 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3898 {
3899 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3900 if (pVM->hm.s.fNestedPaging)
3901 {
3902 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3903
3904 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3905 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3906 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3907 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3908
3909 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3910 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3911 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3912
3913 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3914 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3915 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3916 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3917
3918 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3919 AssertRCReturn(rc, rc);
3920 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3921
3922 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3923 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3924 {
3925 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3926 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3927 {
3928 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3929 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3930 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3931 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3932 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3933 }
3934
3935 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3936 have Unrestricted Execution to handle the guest when it's not using paging. */
3937 GCPhysGuestCR3 = pMixedCtx->cr3;
3938 }
3939 else
3940 {
3941 /*
3942 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3943 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3944 * EPT takes care of translating it to host-physical addresses.
3945 */
3946 RTGCPHYS GCPhys;
3947 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3948 Assert(PDMVmmDevHeapIsEnabled(pVM));
3949
3950 /* We obtain it here every time as the guest could have relocated this PCI region. */
3951 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3952 AssertRCReturn(rc, rc);
3953
3954 GCPhysGuestCR3 = GCPhys;
3955 }
3956
3957 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
3958 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3959 }
3960 else
3961 {
3962 /* Non-nested paging case, just use the hypervisor's CR3. */
3963 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3964
3965 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
3966 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3967 }
3968 AssertRCReturn(rc, rc);
3969
3970 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
3971 }
3972
3973 /*
3974 * Guest CR4.
3975 */
3976 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
3977 {
3978 Assert(!(pMixedCtx->cr4 >> 32));
3979 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3980
3981 /* The guest's view of its CR4 is unblemished. */
3982 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3983 AssertRCReturn(rc, rc);
3984 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
3985
3986 /* Setup VT-x's view of the guest CR4. */
3987 /*
3988 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3989 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3990 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3991 */
3992 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3993 {
3994 Assert(pVM->hm.s.vmx.pRealModeTSS);
3995 Assert(PDMVmmDevHeapIsEnabled(pVM));
3996 u32GuestCR4 &= ~X86_CR4_VME;
3997 }
3998
3999 if (pVM->hm.s.fNestedPaging)
4000 {
4001 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
4002 && !pVM->hm.s.vmx.fUnrestrictedGuest)
4003 {
4004 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
4005 u32GuestCR4 |= X86_CR4_PSE;
4006 /* Our identity mapping is a 32-bit page directory. */
4007 u32GuestCR4 &= ~X86_CR4_PAE;
4008 }
4009 /* else use guest CR4.*/
4010 }
4011 else
4012 {
4013 /*
4014 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4015 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4016 */
4017 switch (pVCpu->hm.s.enmShadowMode)
4018 {
4019 case PGMMODE_REAL: /* Real-mode. */
4020 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4021 case PGMMODE_32_BIT: /* 32-bit paging. */
4022 {
4023 u32GuestCR4 &= ~X86_CR4_PAE;
4024 break;
4025 }
4026
4027 case PGMMODE_PAE: /* PAE paging. */
4028 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4029 {
4030 u32GuestCR4 |= X86_CR4_PAE;
4031 break;
4032 }
4033
4034 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4035 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4036#ifdef VBOX_ENABLE_64_BITS_GUESTS
4037 break;
4038#endif
4039 default:
4040 AssertFailed();
4041 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4042 }
4043 }
4044
4045 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4046 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4047 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4048 u32GuestCR4 |= uSetCR4;
4049 u32GuestCR4 &= uZapCR4;
4050
4051 /* Write VT-x's view of the guest CR4 into the VMCS. */
4052 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
4053 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
4054 AssertRCReturn(rc, rc);
4055
4056 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
4057 uint32_t u32CR4Mask = 0;
4058 u32CR4Mask = X86_CR4_VME
4059 | X86_CR4_PAE
4060 | X86_CR4_PGE
4061 | X86_CR4_PSE
4062 | X86_CR4_VMXE;
4063 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4064 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
4065 AssertRCReturn(rc, rc);
4066
4067 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4068 }
4069 return rc;
4070}
4071
4072
4073/**
4074 * Loads the guest debug registers into the guest-state area in the VMCS.
4075 * This also sets up whether #DB and MOV DRx accesses cause VM-exits.
4076 *
4077 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4078 *
4079 * @returns VBox status code.
4080 * @param pVCpu Pointer to the VMCPU.
4081 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4082 * out-of-sync. Make sure to update the required fields
4083 * before using them.
4084 *
4085 * @remarks No-long-jump zone!!!
4086 */
4087static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4088{
4089 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4090 return VINF_SUCCESS;
4091
4092#ifdef VBOX_STRICT
4093 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4094 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4095 {
4096 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4097 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4098 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4099 }
4100#endif
4101
4102 int rc;
4103 PVM pVM = pVCpu->CTX_SUFF(pVM);
4104 bool fInterceptDB = false;
4105 bool fInterceptMovDRx = false;
4106 if ( pVCpu->hm.s.fSingleInstruction
4107 || DBGFIsStepping(pVCpu))
4108 {
4109 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4110 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4111 {
4112 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4113 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4114 AssertRCReturn(rc, rc);
4115 Assert(fInterceptDB == false);
4116 }
4117 else
4118 {
4119 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4120 pVCpu->hm.s.fClearTrapFlag = true;
4121 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4122 fInterceptDB = true;
4123 }
4124 }
4125
4126 if ( fInterceptDB
4127 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4128 {
4129 /*
4130 * Use the combined guest and host DRx values found in the hypervisor
4131 * register set because the debugger has breakpoints active or someone
4132 * is single stepping on the host side without a monitor trap flag.
4133 *
4134 * Note! DBGF expects a clean DR6 state before executing guest code.
4135 */
4136#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4137 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4138 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4139 {
4140 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4141 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4142 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4143 }
4144 else
4145#endif
4146 if (!CPUMIsHyperDebugStateActive(pVCpu))
4147 {
4148 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4149 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4150 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4151 }
4152
4153 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4154 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4155 AssertRCReturn(rc, rc);
4156
4157 pVCpu->hm.s.fUsingHyperDR7 = true;
4158 fInterceptDB = true;
4159 fInterceptMovDRx = true;
4160 }
4161 else
4162 {
4163 /*
4164 * If the guest has enabled debug registers, we need to load them prior to
4165 * executing guest code so they'll trigger at the right time.
4166 */
4167 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4168 {
4169#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4170 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4171 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4172 {
4173 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4174 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4175 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4176 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4177 }
4178 else
4179#endif
4180 if (!CPUMIsGuestDebugStateActive(pVCpu))
4181 {
4182 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4183 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4184 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4185 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4186 }
4187 Assert(!fInterceptDB);
4188 Assert(!fInterceptMovDRx);
4189 }
4190 /*
4191 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4192 * must intercept #DB in order to maintain a correct DR6 guest value.
4193 */
4194#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4195 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4196 && !CPUMIsGuestDebugStateActive(pVCpu))
4197#else
4198 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4199#endif
4200 {
4201 fInterceptMovDRx = true;
4202 fInterceptDB = true;
4203 }
4204
4205 /* Update guest DR7. */
4206 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4207 AssertRCReturn(rc, rc);
4208
4209 pVCpu->hm.s.fUsingHyperDR7 = false;
4210 }
4211
4212 /*
4213 * Update the exception bitmap regarding intercepting #DB generated by the guest.
4214 */
4215 if (fInterceptDB)
4216 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4217 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4218 {
4219#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4220 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4221#endif
4222 }
4223 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
4224 AssertRCReturn(rc, rc);
4225
4226 /*
4227 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4228 */
4229 if (fInterceptMovDRx)
4230 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4231 else
4232 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4233 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4234 AssertRCReturn(rc, rc);
4235
4236 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4237 return VINF_SUCCESS;
4238}
4239
4240
4241#ifdef VBOX_STRICT
4242/**
4243 * Strict function to validate segment registers.
4244 *
4245 * @remarks ASSUMES CR0 is up to date.
4246 */
4247static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4248{
4249 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4250 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4251 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4252 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4253 && ( !CPUMIsGuestInRealModeEx(pCtx)
4254 && !CPUMIsGuestInV86ModeEx(pCtx)))
4255 {
4256 /* Protected mode checks */
4257 /* CS */
4258 Assert(pCtx->cs.Attr.n.u1Present);
4259 Assert(!(pCtx->cs.Attr.u & 0xf00));
4260 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4261 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4262 || !(pCtx->cs.Attr.n.u1Granularity));
4263 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4264 || (pCtx->cs.Attr.n.u1Granularity));
4265 /* CS cannot be loaded with NULL in protected mode. */
4266 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
4267 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4268 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4269 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4270 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4271 else
4272 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4273 /* SS */
4274 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4275 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4276 if ( !(pCtx->cr0 & X86_CR0_PE)
4277 || pCtx->cs.Attr.n.u4Type == 3)
4278 {
4279 Assert(!pCtx->ss.Attr.n.u2Dpl);
4280 }
4281 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4282 {
4283 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4284 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4285 Assert(pCtx->ss.Attr.n.u1Present);
4286 Assert(!(pCtx->ss.Attr.u & 0xf00));
4287 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4288 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4289 || !(pCtx->ss.Attr.n.u1Granularity));
4290 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4291 || (pCtx->ss.Attr.n.u1Granularity));
4292 }
4293 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4294 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4295 {
4296 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4297 Assert(pCtx->ds.Attr.n.u1Present);
4298 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4299 Assert(!(pCtx->ds.Attr.u & 0xf00));
4300 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4301 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4302 || !(pCtx->ds.Attr.n.u1Granularity));
4303 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4304 || (pCtx->ds.Attr.n.u1Granularity));
4305 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4306 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4307 }
4308 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4309 {
4310 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4311 Assert(pCtx->es.Attr.n.u1Present);
4312 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4313 Assert(!(pCtx->es.Attr.u & 0xf00));
4314 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4315 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4316 || !(pCtx->es.Attr.n.u1Granularity));
4317 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4318 || (pCtx->es.Attr.n.u1Granularity));
4319 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4320 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4321 }
4322 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4323 {
4324 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4325 Assert(pCtx->fs.Attr.n.u1Present);
4326 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4327 Assert(!(pCtx->fs.Attr.u & 0xf00));
4328 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4329 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4330 || !(pCtx->fs.Attr.n.u1Granularity));
4331 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4332 || (pCtx->fs.Attr.n.u1Granularity));
4333 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4334 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4335 }
4336 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4337 {
4338 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4339 Assert(pCtx->gs.Attr.n.u1Present);
4340 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4341 Assert(!(pCtx->gs.Attr.u & 0xf00));
4342 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4343 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4344 || !(pCtx->gs.Attr.n.u1Granularity));
4345 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4346 || (pCtx->gs.Attr.n.u1Granularity));
4347 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4348 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4349 }
4350 /* 64-bit capable CPUs. */
4351# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4352 if (HMVMX_IS_64BIT_HOST_MODE())
4353 {
4354 Assert(!(pCtx->cs.u64Base >> 32));
4355 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4356 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4357 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4358 }
4359# endif
4360 }
4361 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4362 || ( CPUMIsGuestInRealModeEx(pCtx)
4363 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4364 {
4365 /* Real and v86 mode checks. */
4366 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4367 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4368 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4369 {
4370 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4371 }
4372 else
4373 {
4374 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4375 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4376 }
4377
4378 /* CS */
4379 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4380 Assert(pCtx->cs.u32Limit == 0xffff);
4381 Assert(u32CSAttr == 0xf3);
4382 /* SS */
4383 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4384 Assert(pCtx->ss.u32Limit == 0xffff);
4385 Assert(u32SSAttr == 0xf3);
4386 /* DS */
4387 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4388 Assert(pCtx->ds.u32Limit == 0xffff);
4389 Assert(u32DSAttr == 0xf3);
4390 /* ES */
4391 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4392 Assert(pCtx->es.u32Limit == 0xffff);
4393 Assert(u32ESAttr == 0xf3);
4394 /* FS */
4395 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4396 Assert(pCtx->fs.u32Limit == 0xffff);
4397 Assert(u32FSAttr == 0xf3);
4398 /* GS */
4399 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4400 Assert(pCtx->gs.u32Limit == 0xffff);
4401 Assert(u32GSAttr == 0xf3);
4402 /* 64-bit capable CPUs. */
4403# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4404 if (HMVMX_IS_64BIT_HOST_MODE())
4405 {
4406 Assert(!(pCtx->cs.u64Base >> 32));
4407 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4408 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4409 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4410 }
4411# endif
4412 }
4413}
4414#endif /* VBOX_STRICT */
4415
4416
4417/**
4418 * Writes a guest segment register into the guest-state area in the VMCS.
4419 *
4420 * @returns VBox status code.
4421 * @param pVCpu Pointer to the VMCPU.
4422 * @param idxSel Index of the selector in the VMCS.
4423 * @param idxLimit Index of the segment limit in the VMCS.
4424 * @param idxBase Index of the segment base in the VMCS.
4425 * @param idxAccess Index of the access rights of the segment in the VMCS.
4426 * @param pSelReg Pointer to the segment selector.
4427 *
4428 * @remarks No-long-jump zone!!!
4429 */
4430static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4431 uint32_t idxAccess, PCPUMSELREG pSelReg)
4432{
4433 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4434 AssertRCReturn(rc, rc);
4435 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4436 AssertRCReturn(rc, rc);
4437 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4438 AssertRCReturn(rc, rc);
4439
4440 uint32_t u32Access = pSelReg->Attr.u;
4441 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4442 {
4443 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4444 u32Access = 0xf3;
4445 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4446 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4447 }
4448 else
4449 {
4450 /*
4451 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4452 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4453 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4454 * loaded in protected-mode have their attribute as 0.
4455 */
4456 if (!u32Access)
4457 u32Access = X86DESCATTR_UNUSABLE;
4458 }
4459
4460 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4461 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4462 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4463
4464 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4465 AssertRCReturn(rc, rc);
4466 return rc;
4467}
4468
4469
4470/**
4471 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4472 * into the guest-state area in the VMCS.
4473 *
4474 * @returns VBox status code.
4475 * @param pVM Pointer to the VM.
4476 * @param pVCPU Pointer to the VMCPU.
4477 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4478 * out-of-sync. Make sure to update the required fields
4479 * before using them.
4480 *
4481 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4482 * @remarks No-long-jump zone!!!
4483 */
4484static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4485{
4486 int rc = VERR_INTERNAL_ERROR_5;
4487 PVM pVM = pVCpu->CTX_SUFF(pVM);
4488
4489 /*
4490 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4491 */
4492 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4493 {
4494 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4495 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4496 {
4497 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4498 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4499 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4500 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4501 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4502 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4503 }
4504
4505#ifdef VBOX_WITH_REM
4506 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4507 {
4508 Assert(pVM->hm.s.vmx.pRealModeTSS);
4509 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4510 if ( pVCpu->hm.s.vmx.fWasInRealMode
4511 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4512 {
4513 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4514 in real-mode (e.g. OpenBSD 4.0) */
4515 REMFlushTBs(pVM);
4516 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4517 pVCpu->hm.s.vmx.fWasInRealMode = false;
4518 }
4519 }
4520#endif
4521 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4522 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4523 AssertRCReturn(rc, rc);
4524 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4525 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4526 AssertRCReturn(rc, rc);
4527 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4528 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4529 AssertRCReturn(rc, rc);
4530 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4531 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4532 AssertRCReturn(rc, rc);
4533 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4534 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4535 AssertRCReturn(rc, rc);
4536 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4537 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4538 AssertRCReturn(rc, rc);
4539
4540#ifdef VBOX_STRICT
4541 /* Validate. */
4542 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4543#endif
4544
4545 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4546 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4547 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4548 }
4549
4550 /*
4551 * Guest TR.
4552 */
4553 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4554 {
4555 /*
4556 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4557 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4558 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4559 */
4560 uint16_t u16Sel = 0;
4561 uint32_t u32Limit = 0;
4562 uint64_t u64Base = 0;
4563 uint32_t u32AccessRights = 0;
4564
4565 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4566 {
4567 u16Sel = pMixedCtx->tr.Sel;
4568 u32Limit = pMixedCtx->tr.u32Limit;
4569 u64Base = pMixedCtx->tr.u64Base;
4570 u32AccessRights = pMixedCtx->tr.Attr.u;
4571 }
4572 else
4573 {
4574 Assert(pVM->hm.s.vmx.pRealModeTSS);
4575 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4576
4577 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4578 RTGCPHYS GCPhys;
4579 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4580 AssertRCReturn(rc, rc);
4581
4582 X86DESCATTR DescAttr;
4583 DescAttr.u = 0;
4584 DescAttr.n.u1Present = 1;
4585 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4586
4587 u16Sel = 0;
4588 u32Limit = HM_VTX_TSS_SIZE;
4589 u64Base = GCPhys; /* in real-mode phys = virt. */
4590 u32AccessRights = DescAttr.u;
4591 }
4592
4593 /* Validate. */
4594 Assert(!(u16Sel & RT_BIT(2)));
4595 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4596 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4597 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4598 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4599 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4600 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4601 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4602 Assert( (u32Limit & 0xfff) == 0xfff
4603 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4604 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4605 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4606
4607 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4608 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4609 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4610 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4611
4612 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4613 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4614 }
4615
4616 /*
4617 * Guest GDTR.
4618 */
4619 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4620 {
4621 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4622 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4623
4624 /* Validate. */
4625 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4626
4627 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4628 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4629 }
4630
4631 /*
4632 * Guest LDTR.
4633 */
4634 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4635 {
4636 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4637 uint32_t u32Access = 0;
4638 if (!pMixedCtx->ldtr.Attr.u)
4639 u32Access = X86DESCATTR_UNUSABLE;
4640 else
4641 u32Access = pMixedCtx->ldtr.Attr.u;
4642
4643 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4644 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4645 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4646 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4647
4648 /* Validate. */
4649 if (!(u32Access & X86DESCATTR_UNUSABLE))
4650 {
4651 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4652 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4653 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4654 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4655 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4656 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4657 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4658 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4659 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4660 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4661 }
4662
4663 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4664 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4665 }
4666
4667 /*
4668 * Guest IDTR.
4669 */
4670 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4671 {
4672 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4673 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4674
4675 /* Validate. */
4676 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4677
4678 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4679 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4680 }
4681
4682 return VINF_SUCCESS;
4683}
4684
4685
4686/**
4687 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4688 * areas. These MSRs will automatically be loaded to the host CPU on every
4689 * successful VM-entry and stored from the host CPU on every successful VM-exit.
4690 *
4691 * This also creates/updates MSR slots for the host MSRs. The actual host
4692 * MSR values are -not- updated here for performance reasons. See
4693 * hmR0VmxSaveHostMsrs().
4694 *
4695 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4696 *
4697 * @returns VBox status code.
4698 * @param pVCpu Pointer to the VMCPU.
4699 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4700 * out-of-sync. Make sure to update the required fields
4701 * before using them.
4702 *
4703 * @remarks No-long-jump zone!!!
4704 */
4705static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4706{
4707 AssertPtr(pVCpu);
4708 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4709
4710 /*
4711 * MSRs that we use the auto-load/store MSR area in the VMCS.
4712 */
4713 PVM pVM = pVCpu->CTX_SUFF(pVM);
4714 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4715 {
4716 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4717#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4718 if (pVM->hm.s.fAllow64BitGuests)
4719 {
4720 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */);
4721 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */);
4722 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */);
4723 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
4724# ifdef DEBUG
4725 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4726 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4727 {
4728 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4729 pMsr->u64Value));
4730 }
4731# endif
4732 }
4733#endif
4734 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4735 }
4736
4737 /*
4738 * Guest Sysenter MSRs.
4739 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4740 * VM-exits on WRMSRs for these MSRs.
4741 */
4742 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4743 {
4744 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4745 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4746 }
4747
4748 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4749 {
4750 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4751 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4752 }
4753
4754 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4755 {
4756 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4757 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4758 }
4759
4760 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4761 {
4762 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4763 {
4764 /*
4765 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4766 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4767 */
4768 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4769 {
4770 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4771 AssertRCReturn(rc,rc);
4772 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4773 }
4774 else
4775 {
4776 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
4777 /* We need to intercept reads too, see @bugref{7386} comment #16. */
4778 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4779 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4780 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4781 }
4782 }
4783 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4784 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4785 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4786 }
4787
4788 return VINF_SUCCESS;
4789}
4790
4791
4792/**
4793 * Loads the guest activity state into the guest-state area in the VMCS.
4794 *
4795 * @returns VBox status code.
4796 * @param pVCpu Pointer to the VMCPU.
4797 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4798 * out-of-sync. Make sure to update the required fields
4799 * before using them.
4800 *
4801 * @remarks No-long-jump zone!!!
4802 */
4803static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4804{
4805 NOREF(pCtx);
4806 /** @todo See if we can make use of other states, e.g.
4807 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4808 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4809 {
4810 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4811 AssertRCReturn(rc, rc);
4812
4813 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4814 }
4815 return VINF_SUCCESS;
4816}
4817
4818
4819/**
4820 * Sets up the appropriate function to run guest code.
4821 *
4822 * @returns VBox status code.
4823 * @param pVCpu Pointer to the VMCPU.
4824 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4825 * out-of-sync. Make sure to update the required fields
4826 * before using them.
4827 *
4828 * @remarks No-long-jump zone!!!
4829 */
4830static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4831{
4832 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4833 {
4834#ifndef VBOX_ENABLE_64_BITS_GUESTS
4835 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4836#endif
4837 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4838#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4839 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4840 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4841 {
4842 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4843 {
4844 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4845 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4846 | HM_CHANGED_VMX_EXIT_CTLS
4847 | HM_CHANGED_VMX_ENTRY_CTLS
4848 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4849 }
4850 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4851 }
4852#else
4853 /* 64-bit host or hybrid host. */
4854 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4855#endif
4856 }
4857 else
4858 {
4859 /* Guest is not in long mode, use the 32-bit handler. */
4860#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4861 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4862 {
4863 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4864 {
4865 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4866 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4867 | HM_CHANGED_VMX_EXIT_CTLS
4868 | HM_CHANGED_VMX_ENTRY_CTLS
4869 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4870 }
4871 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4872 }
4873#else
4874 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4875#endif
4876 }
4877 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4878 return VINF_SUCCESS;
4879}
4880
4881
4882/**
4883 * Wrapper for running the guest code in VT-x.
4884 *
4885 * @returns VBox strict status code.
4886 * @param pVM Pointer to the VM.
4887 * @param pVCpu Pointer to the VMCPU.
4888 * @param pCtx Pointer to the guest-CPU context.
4889 *
4890 * @remarks No-long-jump zone!!!
4891 */
4892DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4893{
4894 /*
4895 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4896 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4897 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4898 */
4899 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4900 /** @todo Add stats for resume vs launch. */
4901#ifdef VBOX_WITH_KERNEL_USING_XMM
4902 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4903#else
4904 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4905#endif
4906}
4907
4908
4909/**
4910 * Reports world-switch error and dumps some useful debug info.
4911 *
4912 * @param pVM Pointer to the VM.
4913 * @param pVCpu Pointer to the VMCPU.
4914 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4915 * @param pCtx Pointer to the guest-CPU context.
4916 * @param pVmxTransient Pointer to the VMX transient structure (only
4917 * exitReason updated).
4918 */
4919static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4920{
4921 Assert(pVM);
4922 Assert(pVCpu);
4923 Assert(pCtx);
4924 Assert(pVmxTransient);
4925 HMVMX_ASSERT_PREEMPT_SAFE();
4926
4927 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4928 switch (rcVMRun)
4929 {
4930 case VERR_VMX_INVALID_VMXON_PTR:
4931 AssertFailed();
4932 break;
4933 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4934 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4935 {
4936 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4937 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4938 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4939 AssertRC(rc);
4940
4941 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4942 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4943 Cannot do it here as we may have been long preempted. */
4944
4945#ifdef VBOX_STRICT
4946 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4947 pVmxTransient->uExitReason));
4948 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4949 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4950 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4951 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4952 else
4953 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4954 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4955 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4956
4957 /* VMX control bits. */
4958 uint32_t u32Val;
4959 uint64_t u64Val;
4960 HMVMXHCUINTREG uHCReg;
4961 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4962 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4963 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4964 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4965 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4966 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4967 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4968 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4969 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4970 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4971 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4972 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4973 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4974 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4975 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4976 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4977 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4978 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4979 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4980 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4981 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4982 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4983 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4984 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4985 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4986 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4987 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4988 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4989 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4990 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4991 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4992 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4993 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4994 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4995 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4996 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4997 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4998 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4999 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5000 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5001 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5002 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5003
5004 /* Guest bits. */
5005 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5006 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5007 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5008 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5009 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5010 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5011 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
5012 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
5013
5014 /* Host bits. */
5015 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5016 Log4(("Host CR0 %#RHr\n", uHCReg));
5017 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5018 Log4(("Host CR3 %#RHr\n", uHCReg));
5019 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5020 Log4(("Host CR4 %#RHr\n", uHCReg));
5021
5022 RTGDTR HostGdtr;
5023 PCX86DESCHC pDesc;
5024 ASMGetGDTR(&HostGdtr);
5025 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
5026 Log4(("Host CS %#08x\n", u32Val));
5027 if (u32Val < HostGdtr.cbGdt)
5028 {
5029 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5030 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
5031 }
5032
5033 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
5034 Log4(("Host DS %#08x\n", u32Val));
5035 if (u32Val < HostGdtr.cbGdt)
5036 {
5037 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5038 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
5039 }
5040
5041 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
5042 Log4(("Host ES %#08x\n", u32Val));
5043 if (u32Val < HostGdtr.cbGdt)
5044 {
5045 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5046 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
5047 }
5048
5049 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
5050 Log4(("Host FS %#08x\n", u32Val));
5051 if (u32Val < HostGdtr.cbGdt)
5052 {
5053 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5054 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
5055 }
5056
5057 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
5058 Log4(("Host GS %#08x\n", u32Val));
5059 if (u32Val < HostGdtr.cbGdt)
5060 {
5061 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5062 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
5063 }
5064
5065 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
5066 Log4(("Host SS %#08x\n", u32Val));
5067 if (u32Val < HostGdtr.cbGdt)
5068 {
5069 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5070 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
5071 }
5072
5073 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
5074 Log4(("Host TR %#08x\n", u32Val));
5075 if (u32Val < HostGdtr.cbGdt)
5076 {
5077 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5078 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
5079 }
5080
5081 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5082 Log4(("Host TR Base %#RHv\n", uHCReg));
5083 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5084 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5085 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5086 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5087 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5088 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5089 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5090 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5091 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5092 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5093 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5094 Log4(("Host RSP %#RHv\n", uHCReg));
5095 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5096 Log4(("Host RIP %#RHv\n", uHCReg));
5097# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5098 if (HMVMX_IS_64BIT_HOST_MODE())
5099 {
5100 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5101 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5102 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5103 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5104 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5105 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5106 }
5107# endif
5108#endif /* VBOX_STRICT */
5109 break;
5110 }
5111
5112 default:
5113 /* Impossible */
5114 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5115 break;
5116 }
5117 NOREF(pVM); NOREF(pCtx);
5118}
5119
5120
5121#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5122#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5123# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5124#endif
5125#ifdef VBOX_STRICT
5126static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5127{
5128 switch (idxField)
5129 {
5130 case VMX_VMCS_GUEST_RIP:
5131 case VMX_VMCS_GUEST_RSP:
5132 case VMX_VMCS_GUEST_SYSENTER_EIP:
5133 case VMX_VMCS_GUEST_SYSENTER_ESP:
5134 case VMX_VMCS_GUEST_GDTR_BASE:
5135 case VMX_VMCS_GUEST_IDTR_BASE:
5136 case VMX_VMCS_GUEST_CS_BASE:
5137 case VMX_VMCS_GUEST_DS_BASE:
5138 case VMX_VMCS_GUEST_ES_BASE:
5139 case VMX_VMCS_GUEST_FS_BASE:
5140 case VMX_VMCS_GUEST_GS_BASE:
5141 case VMX_VMCS_GUEST_SS_BASE:
5142 case VMX_VMCS_GUEST_LDTR_BASE:
5143 case VMX_VMCS_GUEST_TR_BASE:
5144 case VMX_VMCS_GUEST_CR3:
5145 return true;
5146 }
5147 return false;
5148}
5149
5150static bool hmR0VmxIsValidReadField(uint32_t idxField)
5151{
5152 switch (idxField)
5153 {
5154 /* Read-only fields. */
5155 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5156 return true;
5157 }
5158 /* Remaining readable fields should also be writable. */
5159 return hmR0VmxIsValidWriteField(idxField);
5160}
5161#endif /* VBOX_STRICT */
5162
5163
5164/**
5165 * Executes the specified handler in 64-bit mode.
5166 *
5167 * @returns VBox status code.
5168 * @param pVM Pointer to the VM.
5169 * @param pVCpu Pointer to the VMCPU.
5170 * @param pCtx Pointer to the guest CPU context.
5171 * @param enmOp The operation to perform.
5172 * @param cbParam Number of parameters.
5173 * @param paParam Array of 32-bit parameters.
5174 */
5175VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
5176 uint32_t *paParam)
5177{
5178 int rc, rc2;
5179 PHMGLOBALCPUINFO pCpu;
5180 RTHCPHYS HCPhysCpuPage;
5181 RTCCUINTREG uOldEflags;
5182
5183 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5184 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5185 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5186 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5187
5188#ifdef VBOX_STRICT
5189 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5190 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5191
5192 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5193 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5194#endif
5195
5196 /* Disable interrupts. */
5197 uOldEflags = ASMIntDisableFlags();
5198
5199#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5200 RTCPUID idHostCpu = RTMpCpuId();
5201 CPUMR0SetLApic(pVCpu, idHostCpu);
5202#endif
5203
5204 pCpu = HMR0GetCurrentCpu();
5205 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5206
5207 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5208 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5209
5210 /* Leave VMX Root Mode. */
5211 VMXDisable();
5212
5213 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5214
5215 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5216 CPUMSetHyperEIP(pVCpu, enmOp);
5217 for (int i = (int)cbParam - 1; i >= 0; i--)
5218 CPUMPushHyper(pVCpu, paParam[i]);
5219
5220 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5221
5222 /* Call the switcher. */
5223 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5224 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5225
5226 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5227 /* Make sure the VMX instructions don't cause #UD faults. */
5228 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
5229
5230 /* Re-enter VMX Root Mode */
5231 rc2 = VMXEnable(HCPhysCpuPage);
5232 if (RT_FAILURE(rc2))
5233 {
5234 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5235 ASMSetFlags(uOldEflags);
5236 return rc2;
5237 }
5238
5239 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5240 AssertRC(rc2);
5241 Assert(!(ASMGetFlags() & X86_EFL_IF));
5242 ASMSetFlags(uOldEflags);
5243 return rc;
5244}
5245
5246
5247/**
5248 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5249 * supporting 64-bit guests.
5250 *
5251 * @returns VBox status code.
5252 * @param fResume Whether to VMLAUNCH or VMRESUME.
5253 * @param pCtx Pointer to the guest-CPU context.
5254 * @param pCache Pointer to the VMCS cache.
5255 * @param pVM Pointer to the VM.
5256 * @param pVCpu Pointer to the VMCPU.
5257 */
5258DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5259{
5260 uint32_t aParam[6];
5261 PHMGLOBALCPUINFO pCpu = NULL;
5262 RTHCPHYS HCPhysCpuPage = 0;
5263 int rc = VERR_INTERNAL_ERROR_5;
5264
5265 pCpu = HMR0GetCurrentCpu();
5266 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5267
5268#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5269 pCache->uPos = 1;
5270 pCache->interPD = PGMGetInterPaeCR3(pVM);
5271 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5272#endif
5273
5274#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5275 pCache->TestIn.HCPhysCpuPage = 0;
5276 pCache->TestIn.HCPhysVmcs = 0;
5277 pCache->TestIn.pCache = 0;
5278 pCache->TestOut.HCPhysVmcs = 0;
5279 pCache->TestOut.pCache = 0;
5280 pCache->TestOut.pCtx = 0;
5281 pCache->TestOut.eflags = 0;
5282#endif
5283
5284 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5285 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5286 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5287 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5288 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5289 aParam[5] = 0;
5290
5291#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5292 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5293 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5294#endif
5295 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
5296
5297#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5298 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5299 Assert(pCtx->dr[4] == 10);
5300 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5301#endif
5302
5303#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5304 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5305 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5306 pVCpu->hm.s.vmx.HCPhysVmcs));
5307 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5308 pCache->TestOut.HCPhysVmcs));
5309 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5310 pCache->TestOut.pCache));
5311 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5312 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5313 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5314 pCache->TestOut.pCtx));
5315 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5316#endif
5317 return rc;
5318}
5319
5320
5321/**
5322 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
5323 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
5324 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
5325 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
5326 *
5327 * @returns VBox status code.
5328 * @param pVM Pointer to the VM.
5329 * @param pVCpu Pointer to the VMCPU.
5330 */
5331static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5332{
5333#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5334{ \
5335 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5336 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5337 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5338 ++cReadFields; \
5339}
5340
5341 AssertPtr(pVM);
5342 AssertPtr(pVCpu);
5343 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5344 uint32_t cReadFields = 0;
5345
5346 /*
5347 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5348 * and serve to indicate exceptions to the rules.
5349 */
5350
5351 /* Guest-natural selector base fields. */
5352#if 0
5353 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5354 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5355 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5356#endif
5357 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5358 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5359 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5360 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5361 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5362 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5363 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5364 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5365 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5366 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5367 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5368 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5369#if 0
5370 /* Unused natural width guest-state fields. */
5371 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5372 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5373#endif
5374 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5375 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5376
5377 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5378#if 0
5379 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5380 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5381 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5382 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5383 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5384 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5385 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5386 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5387 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5388#endif
5389
5390 /* Natural width guest-state fields. */
5391 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5392#if 0
5393 /* Currently unused field. */
5394 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5395#endif
5396
5397 if (pVM->hm.s.fNestedPaging)
5398 {
5399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5400 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5401 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5402 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5403 }
5404 else
5405 {
5406 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5407 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5408 }
5409
5410#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5411 return VINF_SUCCESS;
5412}
5413
5414
5415/**
5416 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5417 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5418 * darwin, running 64-bit guests).
5419 *
5420 * @returns VBox status code.
5421 * @param pVCpu Pointer to the VMCPU.
5422 * @param idxField The VMCS field encoding.
5423 * @param u64Val 16, 32 or 64-bit value.
5424 */
5425VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5426{
5427 int rc;
5428 switch (idxField)
5429 {
5430 /*
5431 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5432 */
5433 /* 64-bit Control fields. */
5434 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5435 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5436 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5437 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5438 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5439 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5440 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5441 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5442 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5443 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5444 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5445 case VMX_VMCS64_CTRL_EPTP_FULL:
5446 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5447 /* 64-bit Guest-state fields. */
5448 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5449 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5450 case VMX_VMCS64_GUEST_PAT_FULL:
5451 case VMX_VMCS64_GUEST_EFER_FULL:
5452 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5453 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5454 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5455 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5456 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5457 /* 64-bit Host-state fields. */
5458 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5459 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5460 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5461 {
5462 rc = VMXWriteVmcs32(idxField, u64Val);
5463 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5464 break;
5465 }
5466
5467 /*
5468 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5469 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5470 */
5471 /* Natural-width Guest-state fields. */
5472 case VMX_VMCS_GUEST_CR3:
5473 case VMX_VMCS_GUEST_ES_BASE:
5474 case VMX_VMCS_GUEST_CS_BASE:
5475 case VMX_VMCS_GUEST_SS_BASE:
5476 case VMX_VMCS_GUEST_DS_BASE:
5477 case VMX_VMCS_GUEST_FS_BASE:
5478 case VMX_VMCS_GUEST_GS_BASE:
5479 case VMX_VMCS_GUEST_LDTR_BASE:
5480 case VMX_VMCS_GUEST_TR_BASE:
5481 case VMX_VMCS_GUEST_GDTR_BASE:
5482 case VMX_VMCS_GUEST_IDTR_BASE:
5483 case VMX_VMCS_GUEST_RSP:
5484 case VMX_VMCS_GUEST_RIP:
5485 case VMX_VMCS_GUEST_SYSENTER_ESP:
5486 case VMX_VMCS_GUEST_SYSENTER_EIP:
5487 {
5488 if (!(u64Val >> 32))
5489 {
5490 /* If this field is 64-bit, VT-x will zero out the top bits. */
5491 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5492 }
5493 else
5494 {
5495 /* Assert that only the 32->64 switcher case should ever come here. */
5496 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5497 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5498 }
5499 break;
5500 }
5501
5502 default:
5503 {
5504 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5505 rc = VERR_INVALID_PARAMETER;
5506 break;
5507 }
5508 }
5509 AssertRCReturn(rc, rc);
5510 return rc;
5511}
5512
5513
5514/**
5515 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
5516 * hosts (except darwin) for 64-bit guests.
5517 *
5518 * @param pVCpu Pointer to the VMCPU.
5519 * @param idxField The VMCS field encoding.
5520 * @param u64Val 16, 32 or 64-bit value.
5521 */
5522VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5523{
5524 AssertPtr(pVCpu);
5525 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5526
5527 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5528 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5529
5530 /* Make sure there are no duplicates. */
5531 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5532 {
5533 if (pCache->Write.aField[i] == idxField)
5534 {
5535 pCache->Write.aFieldVal[i] = u64Val;
5536 return VINF_SUCCESS;
5537 }
5538 }
5539
5540 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5541 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5542 pCache->Write.cValidEntries++;
5543 return VINF_SUCCESS;
5544}
5545
5546/* Enable later when the assembly code uses these as callbacks. */
5547#if 0
5548/*
5549 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
5550 *
5551 * @param pVCpu Pointer to the VMCPU.
5552 * @param pCache Pointer to the VMCS cache.
5553 *
5554 * @remarks No-long-jump zone!!!
5555 */
5556VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5557{
5558 AssertPtr(pCache);
5559 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5560 {
5561 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
5562 AssertRC(rc);
5563 }
5564 pCache->Write.cValidEntries = 0;
5565}
5566
5567
5568/**
5569 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
5570 *
5571 * @param pVCpu Pointer to the VMCPU.
5572 * @param pCache Pointer to the VMCS cache.
5573 *
5574 * @remarks No-long-jump zone!!!
5575 */
5576VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5577{
5578 AssertPtr(pCache);
5579 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
5580 {
5581 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
5582 AssertRC(rc);
5583 }
5584}
5585#endif
5586#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5587
5588
5589/**
5590 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
5591 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
5592 * timer.
5593 *
5594 * @returns VBox status code.
5595 * @param pVCpu Pointer to the VMCPU.
5596 *
5597 * @remarks No-long-jump zone!!!
5598 */
5599static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5600{
5601 int rc = VERR_INTERNAL_ERROR_5;
5602 bool fOffsettedTsc = false;
5603 bool fParavirtTsc = false;
5604 PVM pVM = pVCpu->CTX_SUFF(pVM);
5605 if (pVM->hm.s.vmx.fUsePreemptTimer)
5606 {
5607 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc,
5608 &pVCpu->hm.s.vmx.u64TSCOffset);
5609
5610 /* Make sure the returned values have sane upper and lower boundaries. */
5611 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
5612 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5613 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5614 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5615
5616 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5617 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5618 }
5619 else
5620 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5621
5622 if (fParavirtTsc)
5623 {
5624 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5625 AssertRC(rc);
5626 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5627 }
5628
5629 if (fOffsettedTsc)
5630 {
5631 uint64_t u64CurTSC = ASMReadTSC();
5632 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
5633 {
5634 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5635 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5636
5637 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5638 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5639 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5640 }
5641 else
5642 {
5643 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
5644 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5645 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5646 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
5647 }
5648 }
5649 else
5650 {
5651 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5652 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5653 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5654 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5655 }
5656}
5657
5658
5659/**
5660 * Determines if an exception is a contributory exception. Contributory
5661 * exceptions are ones which can cause double-faults. Page-fault is
5662 * intentionally not included here as it's a conditional contributory exception.
5663 *
5664 * @returns true if the exception is contributory, false otherwise.
5665 * @param uVector The exception vector.
5666 */
5667DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5668{
5669 switch (uVector)
5670 {
5671 case X86_XCPT_GP:
5672 case X86_XCPT_SS:
5673 case X86_XCPT_NP:
5674 case X86_XCPT_TS:
5675 case X86_XCPT_DE:
5676 return true;
5677 default:
5678 break;
5679 }
5680 return false;
5681}
5682
5683
5684/**
5685 * Sets an event as a pending event to be injected into the guest.
5686 *
5687 * @param pVCpu Pointer to the VMCPU.
5688 * @param u32IntInfo The VM-entry interruption-information field.
5689 * @param cbInstr The VM-entry instruction length in bytes (for software
5690 * interrupts, exceptions and privileged software
5691 * exceptions).
5692 * @param u32ErrCode The VM-entry exception error code.
5693 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5694 * page-fault.
5695 *
5696 * @remarks Statistics counter assumes this is a guest event being injected or
5697 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5698 * always incremented.
5699 */
5700DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5701 RTGCUINTPTR GCPtrFaultAddress)
5702{
5703 Assert(!pVCpu->hm.s.Event.fPending);
5704 pVCpu->hm.s.Event.fPending = true;
5705 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5706 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5707 pVCpu->hm.s.Event.cbInstr = cbInstr;
5708 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5709
5710 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5711}
5712
5713
5714/**
5715 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
5716 *
5717 * @param pVCpu Pointer to the VMCPU.
5718 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5719 * out-of-sync. Make sure to update the required fields
5720 * before using them.
5721 */
5722DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5723{
5724 NOREF(pMixedCtx);
5725 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5726 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5727 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5728 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5729}
5730
5731
5732/**
5733 * Handle a condition that occurred while delivering an event through the guest
5734 * IDT.
5735 *
5736 * @returns VBox status code (informational error codes included).
5737 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5738 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
5739 * continue execution of the guest which will delivery the #DF.
5740 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5741 *
5742 * @param pVCpu Pointer to the VMCPU.
5743 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5744 * out-of-sync. Make sure to update the required fields
5745 * before using them.
5746 * @param pVmxTransient Pointer to the VMX transient structure.
5747 *
5748 * @remarks No-long-jump zone!!!
5749 */
5750static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5751{
5752 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5753
5754 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5755 AssertRCReturn(rc, rc);
5756 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5757 {
5758 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5759 AssertRCReturn(rc, rc);
5760
5761 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5762 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5763
5764 typedef enum
5765 {
5766 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5767 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5768 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5769 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5770 } VMXREFLECTXCPT;
5771
5772 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5773 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5774 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5775 {
5776 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5777 {
5778 enmReflect = VMXREFLECTXCPT_XCPT;
5779#ifdef VBOX_STRICT
5780 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5781 && uExitVector == X86_XCPT_PF)
5782 {
5783 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5784 }
5785#endif
5786 if ( uExitVector == X86_XCPT_PF
5787 && uIdtVector == X86_XCPT_PF)
5788 {
5789 pVmxTransient->fVectoringDoublePF = true;
5790 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5791 }
5792 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5793 && hmR0VmxIsContributoryXcpt(uExitVector)
5794 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5795 || uIdtVector == X86_XCPT_PF))
5796 {
5797 enmReflect = VMXREFLECTXCPT_DF;
5798 }
5799 else if (uIdtVector == X86_XCPT_DF)
5800 enmReflect = VMXREFLECTXCPT_TF;
5801 }
5802 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5803 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5804 {
5805 /*
5806 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5807 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5808 */
5809 enmReflect = VMXREFLECTXCPT_XCPT;
5810
5811 if (uExitVector == X86_XCPT_PF)
5812 {
5813 pVmxTransient->fVectoringPF = true;
5814 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5815 }
5816 }
5817 }
5818 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5819 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5820 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5821 {
5822 /*
5823 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5824 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5825 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5826 */
5827 enmReflect = VMXREFLECTXCPT_XCPT;
5828 }
5829
5830 /*
5831 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5832 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5833 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5834 *
5835 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5836 */
5837 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5838 && enmReflect == VMXREFLECTXCPT_XCPT
5839 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5840 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5841 {
5842 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5843 }
5844
5845 switch (enmReflect)
5846 {
5847 case VMXREFLECTXCPT_XCPT:
5848 {
5849 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5850 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5851 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5852
5853 uint32_t u32ErrCode = 0;
5854 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5855 {
5856 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5857 AssertRCReturn(rc, rc);
5858 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5859 }
5860
5861 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5862 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5863 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5864 rc = VINF_SUCCESS;
5865 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5866 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5867
5868 break;
5869 }
5870
5871 case VMXREFLECTXCPT_DF:
5872 {
5873 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5874 rc = VINF_HM_DOUBLE_FAULT;
5875 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5876 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5877
5878 break;
5879 }
5880
5881 case VMXREFLECTXCPT_TF:
5882 {
5883 rc = VINF_EM_RESET;
5884 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5885 uExitVector));
5886 break;
5887 }
5888
5889 default:
5890 Assert(rc == VINF_SUCCESS);
5891 break;
5892 }
5893 }
5894 else if ( VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5895 && uExitVector != X86_XCPT_DF
5896 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5897 {
5898 /*
5899 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5900 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5901 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5902 */
5903 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5904 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5905 }
5906
5907 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5908 return rc;
5909}
5910
5911
5912/**
5913 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5914 *
5915 * @returns VBox status code.
5916 * @param pVCpu Pointer to the VMCPU.
5917 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5918 * out-of-sync. Make sure to update the required fields
5919 * before using them.
5920 *
5921 * @remarks No-long-jump zone!!!
5922 */
5923static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5924{
5925 NOREF(pMixedCtx);
5926
5927 /*
5928 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5929 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5930 */
5931 VMMRZCallRing3Disable(pVCpu);
5932 HM_DISABLE_PREEMPT_IF_NEEDED();
5933
5934 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5935 {
5936 uint32_t uVal = 0;
5937 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5938 AssertRCReturn(rc, rc);
5939
5940 uint32_t uShadow = 0;
5941 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5942 AssertRCReturn(rc, rc);
5943
5944 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5945 CPUMSetGuestCR0(pVCpu, uVal);
5946 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
5947 }
5948
5949 HM_RESTORE_PREEMPT_IF_NEEDED();
5950 VMMRZCallRing3Enable(pVCpu);
5951 return VINF_SUCCESS;
5952}
5953
5954
5955/**
5956 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5957 *
5958 * @returns VBox status code.
5959 * @param pVCpu Pointer to the VMCPU.
5960 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5961 * out-of-sync. Make sure to update the required fields
5962 * before using them.
5963 *
5964 * @remarks No-long-jump zone!!!
5965 */
5966static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5967{
5968 NOREF(pMixedCtx);
5969
5970 int rc = VINF_SUCCESS;
5971 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
5972 {
5973 uint32_t uVal = 0;
5974 uint32_t uShadow = 0;
5975 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5976 AssertRCReturn(rc, rc);
5977 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5978 AssertRCReturn(rc, rc);
5979
5980 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5981 CPUMSetGuestCR4(pVCpu, uVal);
5982 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
5983 }
5984 return rc;
5985}
5986
5987
5988/**
5989 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5990 *
5991 * @returns VBox status code.
5992 * @param pVCpu Pointer to the VMCPU.
5993 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5994 * out-of-sync. Make sure to update the required fields
5995 * before using them.
5996 *
5997 * @remarks No-long-jump zone!!!
5998 */
5999static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6000{
6001 int rc = VINF_SUCCESS;
6002 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
6003 {
6004 uint64_t u64Val = 0;
6005 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6006 AssertRCReturn(rc, rc);
6007
6008 pMixedCtx->rip = u64Val;
6009 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
6010 }
6011 return rc;
6012}
6013
6014
6015/**
6016 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
6017 *
6018 * @returns VBox status code.
6019 * @param pVCpu Pointer to the VMCPU.
6020 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6021 * out-of-sync. Make sure to update the required fields
6022 * before using them.
6023 *
6024 * @remarks No-long-jump zone!!!
6025 */
6026static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6027{
6028 int rc = VINF_SUCCESS;
6029 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
6030 {
6031 uint64_t u64Val = 0;
6032 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6033 AssertRCReturn(rc, rc);
6034
6035 pMixedCtx->rsp = u64Val;
6036 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6037 }
6038 return rc;
6039}
6040
6041
6042/**
6043 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
6044 *
6045 * @returns VBox status code.
6046 * @param pVCpu Pointer to the VMCPU.
6047 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6048 * out-of-sync. Make sure to update the required fields
6049 * before using them.
6050 *
6051 * @remarks No-long-jump zone!!!
6052 */
6053static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6054{
6055 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6056 {
6057 uint32_t uVal = 0;
6058 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
6059 AssertRCReturn(rc, rc);
6060
6061 pMixedCtx->eflags.u32 = uVal;
6062 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6063 {
6064 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6065 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
6066
6067 pMixedCtx->eflags.Bits.u1VM = 0;
6068 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6069 }
6070
6071 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6072 }
6073 return VINF_SUCCESS;
6074}
6075
6076
6077/**
6078 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
6079 * guest-CPU context.
6080 */
6081DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6082{
6083 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6084 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6085 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6086 return rc;
6087}
6088
6089
6090/**
6091 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
6092 * from the guest-state area in the VMCS.
6093 *
6094 * @param pVCpu Pointer to the VMCPU.
6095 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6096 * out-of-sync. Make sure to update the required fields
6097 * before using them.
6098 *
6099 * @remarks No-long-jump zone!!!
6100 */
6101static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6102{
6103 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
6104 {
6105 uint32_t uIntrState = 0;
6106 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
6107 AssertRC(rc);
6108
6109 if (!uIntrState)
6110 {
6111 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6112 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6113
6114 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6115 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6116 }
6117 else
6118 {
6119 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6120 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6121 {
6122 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6123 AssertRC(rc);
6124 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6125 AssertRC(rc);
6126
6127 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6128 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6129 }
6130 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6131 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6132
6133 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6134 {
6135 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6136 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6137 }
6138 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6140 }
6141
6142 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6143 }
6144}
6145
6146
6147/**
6148 * Saves the guest's activity state.
6149 *
6150 * @returns VBox status code.
6151 * @param pVCpu Pointer to the VMCPU.
6152 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6153 * out-of-sync. Make sure to update the required fields
6154 * before using them.
6155 *
6156 * @remarks No-long-jump zone!!!
6157 */
6158static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6159{
6160 NOREF(pMixedCtx);
6161 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6162 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6163 return VINF_SUCCESS;
6164}
6165
6166
6167/**
6168 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6169 * the current VMCS into the guest-CPU context.
6170 *
6171 * @returns VBox status code.
6172 * @param pVCpu Pointer to the VMCPU.
6173 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6174 * out-of-sync. Make sure to update the required fields
6175 * before using them.
6176 *
6177 * @remarks No-long-jump zone!!!
6178 */
6179static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6180{
6181 int rc = VINF_SUCCESS;
6182 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6183 {
6184 uint32_t u32Val = 0;
6185 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6186 pMixedCtx->SysEnter.cs = u32Val;
6187 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6188 }
6189
6190 uint64_t u64Val = 0;
6191 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6192 {
6193 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6194 pMixedCtx->SysEnter.eip = u64Val;
6195 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6196 }
6197 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6198 {
6199 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6200 pMixedCtx->SysEnter.esp = u64Val;
6201 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6202 }
6203 return rc;
6204}
6205
6206
6207/**
6208 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6209 * the CPU back into the guest-CPU context.
6210 *
6211 * @returns VBox status code.
6212 * @param pVCpu Pointer to the VMCPU.
6213 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6214 * out-of-sync. Make sure to update the required fields
6215 * before using them.
6216 *
6217 * @remarks No-long-jump zone!!!
6218 */
6219static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6220{
6221#if HC_ARCH_BITS == 64
6222 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6223 {
6224 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6225 VMMRZCallRing3Disable(pVCpu);
6226 HM_DISABLE_PREEMPT_IF_NEEDED();
6227
6228 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6229 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6230 {
6231 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6232 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6233 }
6234
6235 HM_RESTORE_PREEMPT_IF_NEEDED();
6236 VMMRZCallRing3Enable(pVCpu);
6237 }
6238 else
6239 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6240#else
6241 NOREF(pMixedCtx);
6242 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6243#endif
6244
6245 return VINF_SUCCESS;
6246}
6247
6248
6249/**
6250 * Saves the auto load/store'd guest MSRs from the current VMCS into
6251 * the guest-CPU context.
6252 *
6253 * @returns VBox status code.
6254 * @param pVCpu Pointer to the VMCPU.
6255 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6256 * out-of-sync. Make sure to update the required fields
6257 * before using them.
6258 *
6259 * @remarks No-long-jump zone!!!
6260 */
6261static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6262{
6263 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6264 return VINF_SUCCESS;
6265
6266 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6267 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6268 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6269 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6270 {
6271 switch (pMsr->u32Msr)
6272 {
6273 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6274 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6275 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6276 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6277 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6278 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6279 break;
6280
6281 default:
6282 {
6283 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6284 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6285 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6286 }
6287 }
6288 }
6289
6290 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6291 return VINF_SUCCESS;
6292}
6293
6294
6295/**
6296 * Saves the guest control registers from the current VMCS into the guest-CPU
6297 * context.
6298 *
6299 * @returns VBox status code.
6300 * @param pVCpu Pointer to the VMCPU.
6301 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6302 * out-of-sync. Make sure to update the required fields
6303 * before using them.
6304 *
6305 * @remarks No-long-jump zone!!!
6306 */
6307static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6308{
6309 /* Guest CR0. Guest FPU. */
6310 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6311 AssertRCReturn(rc, rc);
6312
6313 /* Guest CR4. */
6314 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6315 AssertRCReturn(rc, rc);
6316
6317 /* Guest CR2 - updated always during the world-switch or in #PF. */
6318 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6319 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6320 {
6321 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6322 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6323
6324 PVM pVM = pVCpu->CTX_SUFF(pVM);
6325 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6326 || ( pVM->hm.s.fNestedPaging
6327 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6328 {
6329 uint64_t u64Val = 0;
6330 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6331 if (pMixedCtx->cr3 != u64Val)
6332 {
6333 CPUMSetGuestCR3(pVCpu, u64Val);
6334 if (VMMRZCallRing3IsEnabled(pVCpu))
6335 {
6336 PGMUpdateCR3(pVCpu, u64Val);
6337 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6338 }
6339 else
6340 {
6341 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6342 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6343 }
6344 }
6345
6346 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6347 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6348 {
6349 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6350 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6351 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6352 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6353
6354 if (VMMRZCallRing3IsEnabled(pVCpu))
6355 {
6356 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6357 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6358 }
6359 else
6360 {
6361 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6362 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6363 }
6364 }
6365 }
6366
6367 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6368 }
6369
6370 /*
6371 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6372 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6373 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6374 *
6375 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6376 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6377 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6378 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6379 *
6380 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6381 */
6382 if (VMMRZCallRing3IsEnabled(pVCpu))
6383 {
6384 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6385 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6386
6387 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6388 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6389
6390 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6391 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6392 }
6393
6394 return rc;
6395}
6396
6397
6398/**
6399 * Reads a guest segment register from the current VMCS into the guest-CPU
6400 * context.
6401 *
6402 * @returns VBox status code.
6403 * @param pVCpu Pointer to the VMCPU.
6404 * @param idxSel Index of the selector in the VMCS.
6405 * @param idxLimit Index of the segment limit in the VMCS.
6406 * @param idxBase Index of the segment base in the VMCS.
6407 * @param idxAccess Index of the access rights of the segment in the VMCS.
6408 * @param pSelReg Pointer to the segment selector.
6409 *
6410 * @remarks No-long-jump zone!!!
6411 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6412 * macro as that takes care of whether to read from the VMCS cache or
6413 * not.
6414 */
6415DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6416 PCPUMSELREG pSelReg)
6417{
6418 NOREF(pVCpu);
6419
6420 uint32_t u32Val = 0;
6421 int rc = VMXReadVmcs32(idxSel, &u32Val);
6422 AssertRCReturn(rc, rc);
6423 pSelReg->Sel = (uint16_t)u32Val;
6424 pSelReg->ValidSel = (uint16_t)u32Val;
6425 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6426
6427 rc = VMXReadVmcs32(idxLimit, &u32Val);
6428 AssertRCReturn(rc, rc);
6429 pSelReg->u32Limit = u32Val;
6430
6431 uint64_t u64Val = 0;
6432 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6433 AssertRCReturn(rc, rc);
6434 pSelReg->u64Base = u64Val;
6435
6436 rc = VMXReadVmcs32(idxAccess, &u32Val);
6437 AssertRCReturn(rc, rc);
6438 pSelReg->Attr.u = u32Val;
6439
6440 /*
6441 * If VT-x marks the segment as unusable, most other bits remain undefined:
6442 * - For CS the L, D and G bits have meaning.
6443 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6444 * - For the remaining data segments no bits are defined.
6445 *
6446 * The present bit and the unusable bit has been observed to be set at the
6447 * same time (the selector was supposed to be invalid as we started executing
6448 * a V8086 interrupt in ring-0).
6449 *
6450 * What should be important for the rest of the VBox code, is that the P bit is
6451 * cleared. Some of the other VBox code recognizes the unusable bit, but
6452 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6453 * safe side here, we'll strip off P and other bits we don't care about. If
6454 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6455 *
6456 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6457 */
6458 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6459 {
6460 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6461
6462 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6463 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6464 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6465
6466 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6467#ifdef DEBUG_bird
6468 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6469 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6470 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6471#endif
6472 }
6473 return VINF_SUCCESS;
6474}
6475
6476
6477#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6478# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6479 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6480 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6481#else
6482# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6483 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6484 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6485#endif
6486
6487
6488/**
6489 * Saves the guest segment registers from the current VMCS into the guest-CPU
6490 * context.
6491 *
6492 * @returns VBox status code.
6493 * @param pVCpu Pointer to the VMCPU.
6494 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6495 * out-of-sync. Make sure to update the required fields
6496 * before using them.
6497 *
6498 * @remarks No-long-jump zone!!!
6499 */
6500static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6501{
6502 /* Guest segment registers. */
6503 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6504 {
6505 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6506 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6507 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6508 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6509 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6510 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6511 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6512
6513 /* Restore segment attributes for real-on-v86 mode hack. */
6514 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6515 {
6516 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6517 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6518 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6519 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6520 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6521 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6522 }
6523 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6524 }
6525
6526 return VINF_SUCCESS;
6527}
6528
6529
6530/**
6531 * Saves the guest descriptor table registers and task register from the current
6532 * VMCS into the guest-CPU context.
6533 *
6534 * @returns VBox status code.
6535 * @param pVCpu Pointer to the VMCPU.
6536 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6537 * out-of-sync. Make sure to update the required fields
6538 * before using them.
6539 *
6540 * @remarks No-long-jump zone!!!
6541 */
6542static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6543{
6544 int rc = VINF_SUCCESS;
6545
6546 /* Guest LDTR. */
6547 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6548 {
6549 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6550 AssertRCReturn(rc, rc);
6551 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6552 }
6553
6554 /* Guest GDTR. */
6555 uint64_t u64Val = 0;
6556 uint32_t u32Val = 0;
6557 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6558 {
6559 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6560 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6561 pMixedCtx->gdtr.pGdt = u64Val;
6562 pMixedCtx->gdtr.cbGdt = u32Val;
6563 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6564 }
6565
6566 /* Guest IDTR. */
6567 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6568 {
6569 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6570 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6571 pMixedCtx->idtr.pIdt = u64Val;
6572 pMixedCtx->idtr.cbIdt = u32Val;
6573 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6574 }
6575
6576 /* Guest TR. */
6577 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6578 {
6579 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6580 AssertRCReturn(rc, rc);
6581
6582 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6583 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6584 {
6585 rc = VMXLOCAL_READ_SEG(TR, tr);
6586 AssertRCReturn(rc, rc);
6587 }
6588 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6589 }
6590 return rc;
6591}
6592
6593#undef VMXLOCAL_READ_SEG
6594
6595
6596/**
6597 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6598 * context.
6599 *
6600 * @returns VBox status code.
6601 * @param pVCpu Pointer to the VMCPU.
6602 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6603 * out-of-sync. Make sure to update the required fields
6604 * before using them.
6605 *
6606 * @remarks No-long-jump zone!!!
6607 */
6608static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6609{
6610 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6611 {
6612 if (!pVCpu->hm.s.fUsingHyperDR7)
6613 {
6614 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6615 uint32_t u32Val;
6616 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6617 pMixedCtx->dr[7] = u32Val;
6618 }
6619
6620 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6621 }
6622 return VINF_SUCCESS;
6623}
6624
6625
6626/**
6627 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6628 *
6629 * @returns VBox status code.
6630 * @param pVCpu Pointer to the VMCPU.
6631 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6632 * out-of-sync. Make sure to update the required fields
6633 * before using them.
6634 *
6635 * @remarks No-long-jump zone!!!
6636 */
6637static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6638{
6639 NOREF(pMixedCtx);
6640
6641 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6642 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6643 return VINF_SUCCESS;
6644}
6645
6646
6647/**
6648 * Saves the entire guest state from the currently active VMCS into the
6649 * guest-CPU context. This essentially VMREADs all guest-data.
6650 *
6651 * @returns VBox status code.
6652 * @param pVCpu Pointer to the VMCPU.
6653 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6654 * out-of-sync. Make sure to update the required fields
6655 * before using them.
6656 */
6657static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6658{
6659 Assert(pVCpu);
6660 Assert(pMixedCtx);
6661
6662 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6663 return VINF_SUCCESS;
6664
6665 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6666 again on the ring-3 callback path, there is no real need to. */
6667 if (VMMRZCallRing3IsEnabled(pVCpu))
6668 VMMR0LogFlushDisable(pVCpu);
6669 else
6670 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6671 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6672
6673 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6674 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6675
6676 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6677 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6678
6679 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6680 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6681
6682 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6683 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6684
6685 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6686 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6687
6688 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6689 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6690
6691 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6692 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6693
6694 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6695 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6696
6697 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6698 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6699
6700 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6701 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6702
6703 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6704 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6705
6706 if (VMMRZCallRing3IsEnabled(pVCpu))
6707 VMMR0LogFlushEnable(pVCpu);
6708
6709 return rc;
6710}
6711
6712
6713/**
6714 * Check per-VM and per-VCPU force flag actions that require us to go back to
6715 * ring-3 for one reason or another.
6716 *
6717 * @returns VBox status code (information status code included).
6718 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6719 * ring-3.
6720 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6721 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6722 * interrupts)
6723 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6724 * all EMTs to be in ring-3.
6725 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6726 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6727 * to the EM loop.
6728 *
6729 * @param pVM Pointer to the VM.
6730 * @param pVCpu Pointer to the VMCPU.
6731 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6732 * out-of-sync. Make sure to update the required fields
6733 * before using them.
6734 */
6735static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6736{
6737 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6738
6739 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6740 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6741 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6742 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6743 {
6744 /* We need the control registers now, make sure the guest-CPU context is updated. */
6745 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6746 AssertRCReturn(rc3, rc3);
6747
6748 /* Pending HM CR3 sync. */
6749 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6750 {
6751 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6752 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6753 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6754 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6755 }
6756
6757 /* Pending HM PAE PDPEs. */
6758 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6759 {
6760 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6761 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6762 }
6763
6764 /* Pending PGM C3 sync. */
6765 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6766 {
6767 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6768 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6769 if (rc2 != VINF_SUCCESS)
6770 {
6771 AssertRC(rc2);
6772 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6773 return rc2;
6774 }
6775 }
6776
6777 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6778 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6779 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6780 {
6781 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6782 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6783 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6784 return rc2;
6785 }
6786
6787 /* Pending VM request packets, such as hardware interrupts. */
6788 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6789 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6790 {
6791 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6792 return VINF_EM_PENDING_REQUEST;
6793 }
6794
6795 /* Pending PGM pool flushes. */
6796 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6797 {
6798 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6799 return VINF_PGM_POOL_FLUSH_PENDING;
6800 }
6801
6802 /* Pending DMA requests. */
6803 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6804 {
6805 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6806 return VINF_EM_RAW_TO_R3;
6807 }
6808 }
6809
6810 return VINF_SUCCESS;
6811}
6812
6813
6814/**
6815 * Converts any TRPM trap into a pending HM event. This is typically used when
6816 * entering from ring-3 (not longjmp returns).
6817 *
6818 * @param pVCpu Pointer to the VMCPU.
6819 */
6820static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6821{
6822 Assert(TRPMHasTrap(pVCpu));
6823 Assert(!pVCpu->hm.s.Event.fPending);
6824
6825 uint8_t uVector;
6826 TRPMEVENT enmTrpmEvent;
6827 RTGCUINT uErrCode;
6828 RTGCUINTPTR GCPtrFaultAddress;
6829 uint8_t cbInstr;
6830
6831 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6832 AssertRC(rc);
6833
6834 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6835 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6836 if (enmTrpmEvent == TRPM_TRAP)
6837 {
6838 switch (uVector)
6839 {
6840 case X86_XCPT_NMI:
6841 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6842 break;
6843
6844 case X86_XCPT_BP:
6845 case X86_XCPT_OF:
6846 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6847 break;
6848
6849 case X86_XCPT_PF:
6850 case X86_XCPT_DF:
6851 case X86_XCPT_TS:
6852 case X86_XCPT_NP:
6853 case X86_XCPT_SS:
6854 case X86_XCPT_GP:
6855 case X86_XCPT_AC:
6856 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6857 /* no break! */
6858 default:
6859 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6860 break;
6861 }
6862 }
6863 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6864 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6865 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6866 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6867 else
6868 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6869
6870 rc = TRPMResetTrap(pVCpu);
6871 AssertRC(rc);
6872 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6873 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6874
6875 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6876 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6877}
6878
6879
6880/**
6881 * Converts any pending HM event into a TRPM trap. Typically used when leaving
6882 * VT-x to execute any instruction.
6883 *
6884 * @param pvCpu Pointer to the VMCPU.
6885 */
6886static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6887{
6888 Assert(pVCpu->hm.s.Event.fPending);
6889
6890 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6891 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6892 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6893 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6894
6895 /* If a trap was already pending, we did something wrong! */
6896 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6897
6898 TRPMEVENT enmTrapType;
6899 switch (uVectorType)
6900 {
6901 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6902 enmTrapType = TRPM_HARDWARE_INT;
6903 break;
6904
6905 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6906 enmTrapType = TRPM_SOFTWARE_INT;
6907 break;
6908
6909 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6910 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6911 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6912 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6913 enmTrapType = TRPM_TRAP;
6914 break;
6915
6916 default:
6917 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6918 enmTrapType = TRPM_32BIT_HACK;
6919 break;
6920 }
6921
6922 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6923
6924 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6925 AssertRC(rc);
6926
6927 if (fErrorCodeValid)
6928 TRPMSetErrorCode(pVCpu, uErrorCode);
6929
6930 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6931 && uVector == X86_XCPT_PF)
6932 {
6933 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6934 }
6935 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6936 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6937 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6938 {
6939 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6940 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6941 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6942 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6943 }
6944 pVCpu->hm.s.Event.fPending = false;
6945}
6946
6947
6948/**
6949 * Does the necessary state syncing before returning to ring-3 for any reason
6950 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6951 *
6952 * @returns VBox status code.
6953 * @param pVM Pointer to the VM.
6954 * @param pVCpu Pointer to the VMCPU.
6955 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6956 * be out-of-sync. Make sure to update the required
6957 * fields before using them.
6958 * @param fSaveGuestState Whether to save the guest state or not.
6959 *
6960 * @remarks No-long-jmp zone!!!
6961 */
6962static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
6963{
6964 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6965 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6966
6967 RTCPUID idCpu = RTMpCpuId();
6968 Log4Func(("HostCpuId=%u\n", idCpu));
6969
6970 /*
6971 * !!! IMPORTANT !!!
6972 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
6973 */
6974
6975 /* Save the guest state if necessary. */
6976 if ( fSaveGuestState
6977 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
6978 {
6979 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6980 AssertRCReturn(rc, rc);
6981 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
6982 }
6983
6984 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6985 if (CPUMIsGuestFPUStateActive(pVCpu))
6986 {
6987 /* We shouldn't reload CR0 without saving it first. */
6988 if (!fSaveGuestState)
6989 {
6990 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6991 AssertRCReturn(rc, rc);
6992 }
6993 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6994 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6995 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
6996 }
6997
6998 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6999#ifdef VBOX_STRICT
7000 if (CPUMIsHyperDebugStateActive(pVCpu))
7001 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
7002#endif
7003 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
7004 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
7005 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7006 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7007
7008#if HC_ARCH_BITS == 64
7009 /* Restore host-state bits that VT-x only restores partially. */
7010 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7011 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7012 {
7013 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7014 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7015 }
7016 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7017#endif
7018
7019#if HC_ARCH_BITS == 64
7020 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7021 if ( pVM->hm.s.fAllow64BitGuests
7022 && pVCpu->hm.s.vmx.fLazyMsrs)
7023 {
7024 /* We shouldn't reload the guest MSRs without saving it first. */
7025 if (!fSaveGuestState)
7026 {
7027 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7028 AssertRCReturn(rc, rc);
7029 }
7030 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7031 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7032 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7033 }
7034#endif
7035
7036 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7037 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7038
7039 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7040 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7041 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7042 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7043 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7044 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7045 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7046 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7047
7048 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7049
7050 /** @todo This partially defeats the purpose of having preemption hooks.
7051 * The problem is, deregistering the hooks should be moved to a place that
7052 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7053 * context.
7054 */
7055 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7056 {
7057 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7058 AssertRCReturn(rc, rc);
7059
7060 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7061 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7062 }
7063 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7064 NOREF(idCpu);
7065
7066 return VINF_SUCCESS;
7067}
7068
7069
7070/**
7071 * Leaves the VT-x session.
7072 *
7073 * @returns VBox status code.
7074 * @param pVM Pointer to the VM.
7075 * @param pVCpu Pointer to the VMCPU.
7076 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7077 * out-of-sync. Make sure to update the required fields
7078 * before using them.
7079 *
7080 * @remarks No-long-jmp zone!!!
7081 */
7082DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7083{
7084 HM_DISABLE_PREEMPT_IF_NEEDED();
7085 HMVMX_ASSERT_CPU_SAFE();
7086 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7087 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7088
7089 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7090 and done this from the VMXR0ThreadCtxCallback(). */
7091 if (!pVCpu->hm.s.fLeaveDone)
7092 {
7093 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7094 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT_IF_NEEDED(), rc2);
7095 pVCpu->hm.s.fLeaveDone = true;
7096 }
7097 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7098
7099 /*
7100 * !!! IMPORTANT !!!
7101 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7102 */
7103
7104 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7105 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
7106 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7107 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7108 VMMR0ThreadCtxHooksDeregister(pVCpu);
7109
7110 /* Leave HM context. This takes care of local init (term). */
7111 int rc = HMR0LeaveCpu(pVCpu);
7112
7113 HM_RESTORE_PREEMPT_IF_NEEDED();
7114
7115 return rc;
7116}
7117
7118
7119/**
7120 * Does the necessary state syncing before doing a longjmp to ring-3.
7121 *
7122 * @returns VBox status code.
7123 * @param pVM Pointer to the VM.
7124 * @param pVCpu Pointer to the VMCPU.
7125 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7126 * out-of-sync. Make sure to update the required fields
7127 * before using them.
7128 *
7129 * @remarks No-long-jmp zone!!!
7130 */
7131DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7132{
7133 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7134}
7135
7136
7137/**
7138 * Take necessary actions before going back to ring-3.
7139 *
7140 * An action requires us to go back to ring-3. This function does the necessary
7141 * steps before we can safely return to ring-3. This is not the same as longjmps
7142 * to ring-3, this is voluntary and prepares the guest so it may continue
7143 * executing outside HM (recompiler/IEM).
7144 *
7145 * @returns VBox status code.
7146 * @param pVM Pointer to the VM.
7147 * @param pVCpu Pointer to the VMCPU.
7148 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7149 * out-of-sync. Make sure to update the required fields
7150 * before using them.
7151 * @param rcExit The reason for exiting to ring-3. Can be
7152 * VINF_VMM_UNKNOWN_RING3_CALL.
7153 */
7154static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7155{
7156 Assert(pVM);
7157 Assert(pVCpu);
7158 Assert(pMixedCtx);
7159 HMVMX_ASSERT_PREEMPT_SAFE();
7160
7161 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7162 {
7163 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7164 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7165 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7166 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7167 }
7168
7169 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7170 VMMRZCallRing3Disable(pVCpu);
7171 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7172
7173 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7174 if (pVCpu->hm.s.Event.fPending)
7175 {
7176 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7177 Assert(!pVCpu->hm.s.Event.fPending);
7178 }
7179
7180 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7181 and if we're injecting an event we should have a TRPM trap pending. */
7182 Assert(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu));
7183 Assert(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu));
7184
7185 /* Save guest state and restore host state bits. */
7186 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7187 AssertRCReturn(rc, rc);
7188 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7189 /* Thread-context hooks are unregistered at this point!!! */
7190
7191 /* Sync recompiler state. */
7192 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7193 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7194 | CPUM_CHANGED_LDTR
7195 | CPUM_CHANGED_GDTR
7196 | CPUM_CHANGED_IDTR
7197 | CPUM_CHANGED_TR
7198 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7199 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7200 if ( pVM->hm.s.fNestedPaging
7201 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7202 {
7203 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7204 }
7205
7206 Assert(!pVCpu->hm.s.fClearTrapFlag);
7207
7208 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7209 if (rcExit != VINF_EM_RAW_INTERRUPT)
7210 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7211
7212 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7213
7214 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7215 VMMRZCallRing3RemoveNotification(pVCpu);
7216 VMMRZCallRing3Enable(pVCpu);
7217
7218 return rc;
7219}
7220
7221
7222/**
7223 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7224 * longjump to ring-3 and possibly get preempted.
7225 *
7226 * @returns VBox status code.
7227 * @param pVCpu Pointer to the VMCPU.
7228 * @param enmOperation The operation causing the ring-3 longjump.
7229 * @param pvUser Opaque pointer to the guest-CPU context. The data
7230 * may be out-of-sync. Make sure to update the required
7231 * fields before using them.
7232 */
7233DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7234{
7235 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7236 {
7237 /*
7238 * !!! IMPORTANT !!!
7239 * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs
7240 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
7241 */
7242 VMMRZCallRing3RemoveNotification(pVCpu);
7243 VMMRZCallRing3Disable(pVCpu);
7244 HM_DISABLE_PREEMPT_IF_NEEDED();
7245
7246 PVM pVM = pVCpu->CTX_SUFF(pVM);
7247 if (CPUMIsGuestFPUStateActive(pVCpu))
7248 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7249
7250 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7251
7252#if HC_ARCH_BITS == 64
7253 /* Restore host-state bits that VT-x only restores partially. */
7254 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7255 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7256 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7257 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7258
7259 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7260 if ( pVM->hm.s.fAllow64BitGuests
7261 && pVCpu->hm.s.vmx.fLazyMsrs)
7262 {
7263 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7264 }
7265#endif
7266 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7267 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7268 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7269 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7270 {
7271 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7272 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7273 }
7274
7275 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7276 VMMR0ThreadCtxHooksDeregister(pVCpu);
7277
7278 HMR0LeaveCpu(pVCpu);
7279 HM_RESTORE_PREEMPT_IF_NEEDED();
7280 return VINF_SUCCESS;
7281 }
7282
7283 Assert(pVCpu);
7284 Assert(pvUser);
7285 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7286 HMVMX_ASSERT_PREEMPT_SAFE();
7287
7288 VMMRZCallRing3Disable(pVCpu);
7289 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7290
7291 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7292 enmOperation));
7293
7294 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7295 AssertRCReturn(rc, rc);
7296
7297 VMMRZCallRing3Enable(pVCpu);
7298 return VINF_SUCCESS;
7299}
7300
7301
7302/**
7303 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7304 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7305 *
7306 * @param pVCpu Pointer to the VMCPU.
7307 */
7308DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7309{
7310 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7311 {
7312 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7313 {
7314 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7315 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7316 AssertRC(rc);
7317 Log4(("Setup interrupt-window exiting\n"));
7318 }
7319 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7320}
7321
7322
7323/**
7324 * Clears the interrupt-window exiting control in the VMCS.
7325 *
7326 * @param pVCpu Pointer to the VMCPU.
7327 */
7328DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7329{
7330 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7331 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7332 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7333 AssertRC(rc);
7334 Log4(("Cleared interrupt-window exiting\n"));
7335}
7336
7337
7338/**
7339 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7340 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7341 *
7342 * @param pVCpu Pointer to the VMCPU.
7343 */
7344DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7345{
7346 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7347 {
7348 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7349 {
7350 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7351 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7352 AssertRC(rc);
7353 Log4(("Setup NMI-window exiting\n"));
7354 }
7355 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7356}
7357
7358
7359/**
7360 * Clears the NMI-window exiting control in the VMCS.
7361 *
7362 * @param pVCpu Pointer to the VMCPU.
7363 */
7364DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7365{
7366 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7367 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7368 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7369 AssertRC(rc);
7370 Log4(("Cleared NMI-window exiting\n"));
7371}
7372
7373
7374/**
7375 * Evaluates the event to be delivered to the guest and sets it as the pending
7376 * event.
7377 *
7378 * @param pVCpu Pointer to the VMCPU.
7379 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7380 * out-of-sync. Make sure to update the required fields
7381 * before using them.
7382 */
7383static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7384{
7385 Assert(!pVCpu->hm.s.Event.fPending);
7386
7387 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7388 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7389 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7390 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7391 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7392
7393 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7394 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7395 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7396 Assert(!TRPMHasTrap(pVCpu));
7397
7398 /*
7399 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7400 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7401 */
7402 /** @todo SMI. SMIs take priority over NMIs. */
7403 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7404 {
7405 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7406 if ( !fBlockNmi
7407 && !fBlockSti
7408 && !fBlockMovSS)
7409 {
7410 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7411 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7412 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7413
7414 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7415 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7416 }
7417 else
7418 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7419 }
7420 /*
7421 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7422 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7423 */
7424 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7425 && !pVCpu->hm.s.fSingleInstruction)
7426 {
7427 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7428 AssertRC(rc);
7429 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7430 if ( !fBlockInt
7431 && !fBlockSti
7432 && !fBlockMovSS)
7433 {
7434 uint8_t u8Interrupt;
7435 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7436 if (RT_SUCCESS(rc))
7437 {
7438 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7439 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7440 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7441
7442 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7443 }
7444 else
7445 {
7446 /** @todo Does this actually happen? If not turn it into an assertion. */
7447 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7448 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7449 }
7450 }
7451 else
7452 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7453 }
7454}
7455
7456
7457/**
7458 * Sets a pending-debug exception to be delivered to the guest if the guest is
7459 * single-stepping.
7460 *
7461 * @param pVCpu Pointer to the VMCPU.
7462 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7463 * out-of-sync. Make sure to update the required fields
7464 * before using them.
7465 */
7466DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7467{
7468 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7469 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
7470 {
7471 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7472 AssertRC(rc);
7473 }
7474}
7475
7476
7477/**
7478 * Injects any pending events into the guest if the guest is in a state to
7479 * receive them.
7480 *
7481 * @returns VBox status code (informational status codes included).
7482 * @param pVCpu Pointer to the VMCPU.
7483 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7484 * out-of-sync. Make sure to update the required fields
7485 * before using them.
7486 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7487 * return VINF_EM_DBG_STEPPED if the event was
7488 * dispatched directly.
7489 */
7490static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7491{
7492 HMVMX_ASSERT_PREEMPT_SAFE();
7493 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7494
7495 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7496 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7497 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7498 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7499
7500 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7501 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7502 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7503 Assert(!TRPMHasTrap(pVCpu));
7504
7505 int rc = VINF_SUCCESS;
7506 if (pVCpu->hm.s.Event.fPending)
7507 {
7508 /*
7509 * Clear any interrupt-window exiting control if we're going to inject an interrupt. Saves one extra
7510 * VM-exit in situations where we previously setup interrupt-window exiting but got other VM-exits and
7511 * ended up enabling interrupts outside VT-x.
7512 */
7513 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7514 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7515 && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7516 {
7517 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7518 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7519 }
7520
7521#ifdef VBOX_STRICT
7522 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7523 {
7524 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7525 Assert(!fBlockInt);
7526 Assert(!fBlockSti);
7527 Assert(!fBlockMovSS);
7528 }
7529 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7530 {
7531 bool fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7532 Assert(!fBlockSti);
7533 Assert(!fBlockMovSS);
7534 Assert(!fBlockNmi);
7535 }
7536#endif
7537 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7538 (uint8_t)uIntType));
7539 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7540 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
7541 AssertRCReturn(rc, rc);
7542
7543 /* Update the interruptibility-state as it could have been changed by
7544 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7545 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7546 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7547
7548#ifdef VBOX_WITH_STATISTICS
7549 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7550 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7551 else
7552 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7553#endif
7554 }
7555
7556 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7557 if ( fBlockSti
7558 || fBlockMovSS)
7559 {
7560 if ( !pVCpu->hm.s.fSingleInstruction
7561 && !DBGFIsStepping(pVCpu))
7562 {
7563 /*
7564 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7565 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7566 * See Intel spec. 27.3.4 "Saving Non-Register State".
7567 */
7568 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7569 AssertRCReturn(rc2, rc2);
7570 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7571 }
7572 else if (pMixedCtx->eflags.Bits.u1TF)
7573 {
7574 /*
7575 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7576 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7577 */
7578 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7579 uIntrState = 0;
7580 }
7581 }
7582
7583 /*
7584 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7585 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7586 */
7587 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7588 AssertRC(rc2);
7589
7590 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7591 NOREF(fBlockMovSS); NOREF(fBlockSti);
7592 return rc;
7593}
7594
7595
7596/**
7597 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
7598 *
7599 * @param pVCpu Pointer to the VMCPU.
7600 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7601 * out-of-sync. Make sure to update the required fields
7602 * before using them.
7603 */
7604DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7605{
7606 NOREF(pMixedCtx);
7607 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7608 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7609}
7610
7611
7612/**
7613 * Injects a double-fault (#DF) exception into the VM.
7614 *
7615 * @returns VBox status code (informational status code included).
7616 * @param pVCpu Pointer to the VMCPU.
7617 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7618 * out-of-sync. Make sure to update the required fields
7619 * before using them.
7620 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7621 * and should return VINF_EM_DBG_STEPPED if the event
7622 * is injected directly (register modified by us, not
7623 * by hardware on VM-entry).
7624 * @param puIntrState Pointer to the current guest interruptibility-state.
7625 * This interruptibility-state will be updated if
7626 * necessary. This cannot not be NULL.
7627 */
7628DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7629{
7630 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7631 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7632 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7633 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7634 fStepping, puIntrState);
7635}
7636
7637
7638/**
7639 * Sets a debug (#DB) exception as pending-for-injection into the VM.
7640 *
7641 * @param pVCpu Pointer to the VMCPU.
7642 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7643 * out-of-sync. Make sure to update the required fields
7644 * before using them.
7645 */
7646DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7647{
7648 NOREF(pMixedCtx);
7649 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7650 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7651 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7652}
7653
7654
7655/**
7656 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
7657 *
7658 * @param pVCpu Pointer to the VMCPU.
7659 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7660 * out-of-sync. Make sure to update the required fields
7661 * before using them.
7662 * @param cbInstr The value of RIP that is to be pushed on the guest
7663 * stack.
7664 */
7665DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7666{
7667 NOREF(pMixedCtx);
7668 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7669 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7670 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7671}
7672
7673
7674/**
7675 * Injects a general-protection (#GP) fault into the VM.
7676 *
7677 * @returns VBox status code (informational status code included).
7678 * @param pVCpu Pointer to the VMCPU.
7679 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7680 * out-of-sync. Make sure to update the required fields
7681 * before using them.
7682 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7683 * mode, i.e. in real-mode it's not valid).
7684 * @param u32ErrorCode The error code associated with the #GP.
7685 * @param fStepping Whether we're running in
7686 * hmR0VmxRunGuestCodeStep() and should return
7687 * VINF_EM_DBG_STEPPED if the event is injected
7688 * directly (register modified by us, not by
7689 * hardware on VM-entry).
7690 * @param puIntrState Pointer to the current guest interruptibility-state.
7691 * This interruptibility-state will be updated if
7692 * necessary. This cannot not be NULL.
7693 */
7694DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7695 bool fStepping, uint32_t *puIntrState)
7696{
7697 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7698 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7699 if (fErrorCodeValid)
7700 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7701 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7702 fStepping, puIntrState);
7703}
7704
7705
7706/**
7707 * Sets a general-protection (#GP) exception as pending-for-injection into the
7708 * VM.
7709 *
7710 * @param pVCpu Pointer to the VMCPU.
7711 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7712 * out-of-sync. Make sure to update the required fields
7713 * before using them.
7714 * @param u32ErrorCode The error code associated with the #GP.
7715 */
7716DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7717{
7718 NOREF(pMixedCtx);
7719 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7720 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7721 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7722 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7723}
7724
7725
7726/**
7727 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7728 *
7729 * @param pVCpu Pointer to the VMCPU.
7730 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7731 * out-of-sync. Make sure to update the required fields
7732 * before using them.
7733 * @param uVector The software interrupt vector number.
7734 * @param cbInstr The value of RIP that is to be pushed on the guest
7735 * stack.
7736 */
7737DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7738{
7739 NOREF(pMixedCtx);
7740 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7741 if ( uVector == X86_XCPT_BP
7742 || uVector == X86_XCPT_OF)
7743 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7744 else
7745 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7746 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7747}
7748
7749
7750/**
7751 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7752 * stack.
7753 *
7754 * @returns VBox status code (information status code included).
7755 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7756 * @param pVM Pointer to the VM.
7757 * @param pMixedCtx Pointer to the guest-CPU context.
7758 * @param uValue The value to push to the guest stack.
7759 */
7760DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7761{
7762 /*
7763 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7764 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7765 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7766 */
7767 if (pMixedCtx->sp == 1)
7768 return VINF_EM_RESET;
7769 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7770 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7771 AssertRCReturn(rc, rc);
7772 return rc;
7773}
7774
7775
7776/**
7777 * Injects an event into the guest upon VM-entry by updating the relevant fields
7778 * in the VM-entry area in the VMCS.
7779 *
7780 * @returns VBox status code (informational error codes included).
7781 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7782 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7783 *
7784 * @param pVCpu Pointer to the VMCPU.
7785 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7786 * be out-of-sync. Make sure to update the required
7787 * fields before using them.
7788 * @param u64IntInfo The VM-entry interruption-information field.
7789 * @param cbInstr The VM-entry instruction length in bytes (for
7790 * software interrupts, exceptions and privileged
7791 * software exceptions).
7792 * @param u32ErrCode The VM-entry exception error code.
7793 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
7794 * @param puIntrState Pointer to the current guest interruptibility-state.
7795 * This interruptibility-state will be updated if
7796 * necessary. This cannot not be NULL.
7797 * @param fStepping Whether we're running in
7798 * hmR0VmxRunGuestCodeStep() and should return
7799 * VINF_EM_DBG_STEPPED if the event is injected
7800 * directly (register modified by us, not by
7801 * hardware on VM-entry).
7802 *
7803 * @remarks Requires CR0!
7804 * @remarks No-long-jump zone!!!
7805 */
7806static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7807 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState)
7808{
7809 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7810 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7811 Assert(puIntrState);
7812 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7813
7814 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7815 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7816
7817#ifdef VBOX_STRICT
7818 /* Validate the error-code-valid bit for hardware exceptions. */
7819 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7820 {
7821 switch (uVector)
7822 {
7823 case X86_XCPT_PF:
7824 case X86_XCPT_DF:
7825 case X86_XCPT_TS:
7826 case X86_XCPT_NP:
7827 case X86_XCPT_SS:
7828 case X86_XCPT_GP:
7829 case X86_XCPT_AC:
7830 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7831 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7832 /* fallthru */
7833 default:
7834 break;
7835 }
7836 }
7837#endif
7838
7839 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7840 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7841 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7842
7843 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7844
7845 /* We require CR0 to check if the guest is in real-mode. */
7846 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7847 AssertRCReturn(rc, rc);
7848
7849 /*
7850 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7851 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7852 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7853 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7854 */
7855 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7856 {
7857 PVM pVM = pVCpu->CTX_SUFF(pVM);
7858 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7859 {
7860 Assert(PDMVmmDevHeapIsEnabled(pVM));
7861 Assert(pVM->hm.s.vmx.pRealModeTSS);
7862
7863 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7864 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7865 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7866 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7867 AssertRCReturn(rc, rc);
7868 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7869
7870 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7871 size_t const cbIdtEntry = sizeof(X86IDTR16);
7872 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7873 {
7874 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7875 if (uVector == X86_XCPT_DF)
7876 return VINF_EM_RESET;
7877
7878 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7879 if (uVector == X86_XCPT_GP)
7880 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
7881
7882 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
7883 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
7884 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
7885 fStepping, puIntrState);
7886 }
7887
7888 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7889 uint16_t uGuestIp = pMixedCtx->ip;
7890 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7891 {
7892 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7893 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7894 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7895 }
7896 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7897 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7898
7899 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7900 X86IDTR16 IdtEntry;
7901 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7902 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7903 AssertRCReturn(rc, rc);
7904
7905 /* Construct the stack frame for the interrupt/exception handler. */
7906 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7907 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7908 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7909 AssertRCReturn(rc, rc);
7910
7911 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7912 if (rc == VINF_SUCCESS)
7913 {
7914 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7915 pMixedCtx->rip = IdtEntry.offSel;
7916 pMixedCtx->cs.Sel = IdtEntry.uSel;
7917 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
7918 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7919 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7920 && uVector == X86_XCPT_PF)
7921 pMixedCtx->cr2 = GCPtrFaultAddress;
7922
7923 /* If any other guest-state bits are changed here, make sure to update
7924 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7925 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7926 | HM_CHANGED_GUEST_RIP
7927 | HM_CHANGED_GUEST_RFLAGS
7928 | HM_CHANGED_GUEST_RSP);
7929
7930 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7931 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7932 {
7933 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7934 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7935 Log4(("Clearing inhibition due to STI.\n"));
7936 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7937 }
7938 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7939 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
7940
7941 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7942 it, if we are returning to ring-3 before executing guest code. */
7943 pVCpu->hm.s.Event.fPending = false;
7944
7945 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
7946 if (fStepping)
7947 rc = VINF_EM_DBG_STEPPED;
7948 }
7949 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7950 return rc;
7951 }
7952
7953 /*
7954 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
7955 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7956 */
7957 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7958 }
7959
7960 /* Validate. */
7961 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7962 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7963 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7964
7965 /* Inject. */
7966 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7967 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7968 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7969 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7970
7971 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7972 && uVector == X86_XCPT_PF)
7973 pMixedCtx->cr2 = GCPtrFaultAddress;
7974
7975 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
7976 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7977
7978 AssertRCReturn(rc, rc);
7979 return rc;
7980}
7981
7982
7983/**
7984 * Clears the interrupt-window exiting control in the VMCS and if necessary
7985 * clears the current event in the VMCS as well.
7986 *
7987 * @returns VBox status code.
7988 * @param pVCpu Pointer to the VMCPU.
7989 *
7990 * @remarks Use this function only to clear events that have not yet been
7991 * delivered to the guest but are injected in the VMCS!
7992 * @remarks No-long-jump zone!!!
7993 */
7994static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
7995{
7996 int rc;
7997 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
7998
7999 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
8000 {
8001 hmR0VmxClearIntWindowExitVmcs(pVCpu);
8002 Assert(!pVCpu->hm.s.Event.fPending);
8003 }
8004
8005 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
8006 {
8007 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
8008 Assert(!pVCpu->hm.s.Event.fPending);
8009 }
8010
8011 if (!pVCpu->hm.s.Event.fPending)
8012 return;
8013
8014#ifdef VBOX_STRICT
8015 uint32_t u32EntryInfo;
8016 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8017 AssertRC(rc);
8018 Assert(VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo));
8019#endif
8020
8021 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8022 AssertRC(rc);
8023
8024 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
8025 AssertRC(rc);
8026
8027 /* We deliberately don't clear "hm.s.Event.fPending" here, it's taken
8028 care of in hmR0VmxExitToRing3() converting the pending event to TRPM. */
8029}
8030
8031
8032/**
8033 * Enters the VT-x session.
8034 *
8035 * @returns VBox status code.
8036 * @param pVM Pointer to the VM.
8037 * @param pVCpu Pointer to the VMCPU.
8038 * @param pCpu Pointer to the CPU info struct.
8039 */
8040VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
8041{
8042 AssertPtr(pVM);
8043 AssertPtr(pVCpu);
8044 Assert(pVM->hm.s.vmx.fSupported);
8045 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8046 NOREF(pCpu); NOREF(pVM);
8047
8048 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8049 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8050
8051#ifdef VBOX_STRICT
8052 /* Make sure we're in VMX root mode. */
8053 RTCCUINTREG u32HostCR4 = ASMGetCR4();
8054 if (!(u32HostCR4 & X86_CR4_VMXE))
8055 {
8056 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
8057 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8058 }
8059#endif
8060
8061 /*
8062 * Load the VCPU's VMCS as the current (and active) one.
8063 */
8064 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8065 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8066 if (RT_FAILURE(rc))
8067 return rc;
8068
8069 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8070 pVCpu->hm.s.fLeaveDone = false;
8071 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8072
8073 return VINF_SUCCESS;
8074}
8075
8076
8077/**
8078 * The thread-context callback (only on platforms which support it).
8079 *
8080 * @param enmEvent The thread-context event.
8081 * @param pVCpu Pointer to the VMCPU.
8082 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8083 * @thread EMT(pVCpu)
8084 */
8085VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8086{
8087 NOREF(fGlobalInit);
8088
8089 switch (enmEvent)
8090 {
8091 case RTTHREADCTXEVENT_PREEMPTING:
8092 {
8093 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8094 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8095 VMCPU_ASSERT_EMT(pVCpu);
8096
8097 PVM pVM = pVCpu->CTX_SUFF(pVM);
8098 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8099
8100 /* No longjmps (logger flushes, locks) in this fragile context. */
8101 VMMRZCallRing3Disable(pVCpu);
8102 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8103
8104 /*
8105 * Restore host-state (FPU, debug etc.)
8106 */
8107 if (!pVCpu->hm.s.fLeaveDone)
8108 {
8109 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8110 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8111 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8112 pVCpu->hm.s.fLeaveDone = true;
8113 }
8114
8115 /* Leave HM context, takes care of local init (term). */
8116 int rc = HMR0LeaveCpu(pVCpu);
8117 AssertRC(rc); NOREF(rc);
8118
8119 /* Restore longjmp state. */
8120 VMMRZCallRing3Enable(pVCpu);
8121 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
8122 break;
8123 }
8124
8125 case RTTHREADCTXEVENT_RESUMED:
8126 {
8127 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8128 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8129 VMCPU_ASSERT_EMT(pVCpu);
8130
8131 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8132 VMMRZCallRing3Disable(pVCpu);
8133 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8134
8135 /* Initialize the bare minimum state required for HM. This takes care of
8136 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8137 int rc = HMR0EnterCpu(pVCpu);
8138 AssertRC(rc);
8139 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8140
8141 /* Load the active VMCS as the current one. */
8142 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8143 {
8144 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8145 AssertRC(rc); NOREF(rc);
8146 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8147 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8148 }
8149 pVCpu->hm.s.fLeaveDone = false;
8150
8151 /* Restore longjmp state. */
8152 VMMRZCallRing3Enable(pVCpu);
8153 break;
8154 }
8155
8156 default:
8157 break;
8158 }
8159}
8160
8161
8162/**
8163 * Saves the host state in the VMCS host-state.
8164 * Sets up the VM-exit MSR-load area.
8165 *
8166 * The CPU state will be loaded from these fields on every successful VM-exit.
8167 *
8168 * @returns VBox status code.
8169 * @param pVM Pointer to the VM.
8170 * @param pVCpu Pointer to the VMCPU.
8171 *
8172 * @remarks No-long-jump zone!!!
8173 */
8174static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8175{
8176 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8177
8178 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8179 return VINF_SUCCESS;
8180
8181 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8182 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8183
8184 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8185 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8186
8187 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8188 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8189
8190 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8191 return rc;
8192}
8193
8194
8195/**
8196 * Saves the host state in the VMCS host-state.
8197 *
8198 * @returns VBox status code.
8199 * @param pVM Pointer to the VM.
8200 * @param pVCpu Pointer to the VMCPU.
8201 *
8202 * @remarks No-long-jump zone!!!
8203 */
8204VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8205{
8206 AssertPtr(pVM);
8207 AssertPtr(pVCpu);
8208
8209 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8210
8211 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8212 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8213 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8214 return hmR0VmxSaveHostState(pVM, pVCpu);
8215}
8216
8217
8218/**
8219 * Loads the guest state into the VMCS guest-state area. The CPU state will be
8220 * loaded from these fields on every successful VM-entry.
8221 *
8222 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
8223 * Sets up the VM-entry controls.
8224 * Sets up the appropriate VMX non-root function to execute guest code based on
8225 * the guest CPU mode.
8226 *
8227 * @returns VBox status code.
8228 * @param pVM Pointer to the VM.
8229 * @param pVCpu Pointer to the VMCPU.
8230 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8231 * out-of-sync. Make sure to update the required fields
8232 * before using them.
8233 *
8234 * @remarks No-long-jump zone!!!
8235 */
8236static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8237{
8238 AssertPtr(pVM);
8239 AssertPtr(pVCpu);
8240 AssertPtr(pMixedCtx);
8241 HMVMX_ASSERT_PREEMPT_SAFE();
8242
8243 VMMRZCallRing3Disable(pVCpu);
8244 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8245
8246 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8247
8248 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8249
8250 /* Determine real-on-v86 mode. */
8251 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8252 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8253 && CPUMIsGuestInRealModeEx(pMixedCtx))
8254 {
8255 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8256 }
8257
8258 /*
8259 * Load the guest-state into the VMCS.
8260 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8261 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8262 */
8263 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8264 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8265
8266 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8267 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8268 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8269
8270 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8271 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8272 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8273
8274 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8275 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8276
8277 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8278 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8279
8280 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8281 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8282 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8283
8284 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8285 determine we don't have to swap EFER after all. */
8286 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8287 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8288
8289 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8290 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8291
8292 /*
8293 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8294 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8295 */
8296 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8297 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8298
8299 /* Clear any unused and reserved bits. */
8300 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8301
8302 VMMRZCallRing3Enable(pVCpu);
8303
8304 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8305 return rc;
8306}
8307
8308
8309/**
8310 * Loads the state shared between the host and guest into the VMCS.
8311 *
8312 * @param pVM Pointer to the VM.
8313 * @param pVCpu Pointer to the VMCPU.
8314 * @param pCtx Pointer to the guest-CPU context.
8315 *
8316 * @remarks No-long-jump zone!!!
8317 */
8318static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8319{
8320 NOREF(pVM);
8321
8322 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8323 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8324
8325 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8326 {
8327 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8328 AssertRC(rc);
8329 }
8330
8331 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8332 {
8333 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8334 AssertRC(rc);
8335
8336 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8337 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8338 {
8339 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8340 AssertRC(rc);
8341 }
8342 }
8343
8344 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8345 {
8346#if HC_ARCH_BITS == 64
8347 if (pVM->hm.s.fAllow64BitGuests)
8348 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8349#endif
8350 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8351 }
8352
8353 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8354 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8355}
8356
8357
8358/**
8359 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8360 *
8361 * @param pVM Pointer to the VM.
8362 * @param pVCpu Pointer to the VMCPU.
8363 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8364 * out-of-sync. Make sure to update the required fields
8365 * before using them.
8366 */
8367DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8368{
8369 HMVMX_ASSERT_PREEMPT_SAFE();
8370
8371 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8372#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8373 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8374#endif
8375
8376 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8377 {
8378 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8379 AssertRC(rc);
8380 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8381 }
8382 else if (HMCPU_CF_VALUE(pVCpu))
8383 {
8384 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8385 AssertRC(rc);
8386 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8387 }
8388
8389 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8390 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8391 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8392 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8393}
8394
8395
8396/**
8397 * Does the preparations before executing guest code in VT-x.
8398 *
8399 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8400 * recompiler/IEM. We must be cautious what we do here regarding committing
8401 * guest-state information into the VMCS assuming we assuredly execute the
8402 * guest in VT-x mode.
8403 *
8404 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8405 * the common-state (TRPM/forceflags), we must undo those changes so that the
8406 * recompiler/IEM can (and should) use them when it resumes guest execution.
8407 * Otherwise such operations must be done when we can no longer exit to ring-3.
8408 *
8409 * @returns Strict VBox status code.
8410 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8411 * have been disabled.
8412 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8413 * double-fault into the guest.
8414 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8415 * dispatched directly.
8416 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8417 *
8418 * @param pVM Pointer to the VM.
8419 * @param pVCpu Pointer to the VMCPU.
8420 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8421 * out-of-sync. Make sure to update the required fields
8422 * before using them.
8423 * @param pVmxTransient Pointer to the VMX transient structure.
8424 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8425 * us ignore some of the reasons for returning to
8426 * ring-3, and return VINF_EM_DBG_STEPPED if event
8427 * dispatching took place.
8428 */
8429static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8430{
8431 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8432
8433#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8434 PGMRZDynMapFlushAutoSet(pVCpu);
8435#endif
8436
8437 /* Check force flag actions that might require us to go back to ring-3. */
8438 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8439 if (rc != VINF_SUCCESS)
8440 return rc;
8441
8442#ifndef IEM_VERIFICATION_MODE_FULL
8443 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8444 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8445 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8446 {
8447 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8448 RTGCPHYS GCPhysApicBase;
8449 GCPhysApicBase = pMixedCtx->msrApicBase;
8450 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8451
8452 /* Unalias any existing mapping. */
8453 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8454 AssertRCReturn(rc, rc);
8455
8456 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8457 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8458 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8459 AssertRCReturn(rc, rc);
8460
8461 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8462 }
8463#endif /* !IEM_VERIFICATION_MODE_FULL */
8464
8465 if (TRPMHasTrap(pVCpu))
8466 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8467 else if (!pVCpu->hm.s.Event.fPending)
8468 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8469
8470 /*
8471 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8472 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8473 */
8474 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8475 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8476 {
8477 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8478 return rc;
8479 }
8480
8481 /*
8482 * Load the guest state bits, we can handle longjmps/getting preempted here.
8483 *
8484 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8485 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8486 * Hence, this needs to be done -after- injection of events.
8487 */
8488 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8489
8490 /*
8491 * No longjmps to ring-3 from this point on!!!
8492 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8493 * This also disables flushing of the R0-logger instance (if any).
8494 */
8495 VMMRZCallRing3Disable(pVCpu);
8496
8497 /*
8498 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8499 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8500 *
8501 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8502 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8503 *
8504 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8505 * executing guest code.
8506 */
8507 pVmxTransient->uEflags = ASMIntDisableFlags();
8508 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8509 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8510 && ( !fStepping /* Optimized for the non-stepping case, of course. */
8511 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8512 {
8513 hmR0VmxClearEventVmcs(pVCpu);
8514 ASMSetFlags(pVmxTransient->uEflags);
8515 VMMRZCallRing3Enable(pVCpu);
8516 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8517 return VINF_EM_RAW_TO_R3;
8518 }
8519
8520 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8521 {
8522 hmR0VmxClearEventVmcs(pVCpu);
8523 ASMSetFlags(pVmxTransient->uEflags);
8524 VMMRZCallRing3Enable(pVCpu);
8525 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8526 return VINF_EM_RAW_INTERRUPT;
8527 }
8528
8529 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8530 pVCpu->hm.s.Event.fPending = false;
8531
8532 return VINF_SUCCESS;
8533}
8534
8535
8536/**
8537 * Prepares to run guest code in VT-x and we've committed to doing so. This
8538 * means there is no backing out to ring-3 or anywhere else at this
8539 * point.
8540 *
8541 * @param pVM Pointer to the VM.
8542 * @param pVCpu Pointer to the VMCPU.
8543 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8544 * out-of-sync. Make sure to update the required fields
8545 * before using them.
8546 * @param pVmxTransient Pointer to the VMX transient structure.
8547 *
8548 * @remarks Called with preemption disabled.
8549 * @remarks No-long-jump zone!!!
8550 */
8551static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8552{
8553 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8554 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8555 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8556
8557 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8558 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8559
8560#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8561 if (!CPUMIsGuestFPUStateActive(pVCpu))
8562 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8563 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8564#endif
8565
8566 if ( pVCpu->hm.s.fUseGuestFpu
8567 && !CPUMIsGuestFPUStateActive(pVCpu))
8568 {
8569 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8570 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8571 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8572 }
8573
8574 /*
8575 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8576 */
8577 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8578 && pVCpu->hm.s.vmx.cMsrs > 0)
8579 {
8580 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8581 }
8582
8583 /*
8584 * Load the host state bits as we may've been preempted (only happens when
8585 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8586 */
8587 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8588 {
8589 /* This ASSUMES that pfnStartVM has been set up already. */
8590 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8591 AssertRC(rc);
8592 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
8593 }
8594 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8595
8596 /*
8597 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8598 */
8599 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8600 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8601 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8602
8603 /* Store status of the shared guest-host state at the time of VM-entry. */
8604#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8605 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8606 {
8607 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8608 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8609 }
8610 else
8611#endif
8612 {
8613 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8614 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8615 }
8616 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8617
8618 /*
8619 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8620 */
8621 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8622 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8623
8624 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8625 RTCPUID idCurrentCpu = pCpu->idCpu;
8626 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8627 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8628 {
8629 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8630 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8631 }
8632
8633 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8634 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8635 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8636 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8637
8638 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8639
8640 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8641 to start executing. */
8642
8643 /*
8644 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8645 */
8646 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8647 {
8648 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8649 {
8650 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8651 AssertRC(rc2);
8652 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8653 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
8654 true /* fUpdateHostMsr */);
8655 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8656 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8657 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8658 }
8659 else
8660 {
8661 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8662 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8663 }
8664 }
8665
8666#ifdef VBOX_STRICT
8667 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8668 hmR0VmxCheckHostEferMsr(pVCpu);
8669 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8670#endif
8671#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8672 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8673 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8674 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8675#endif
8676}
8677
8678
8679/**
8680 * Performs some essential restoration of state after running guest code in
8681 * VT-x.
8682 *
8683 * @param pVM Pointer to the VM.
8684 * @param pVCpu Pointer to the VMCPU.
8685 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8686 * out-of-sync. Make sure to update the required fields
8687 * before using them.
8688 * @param pVmxTransient Pointer to the VMX transient structure.
8689 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8690 *
8691 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8692 *
8693 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8694 * unconditionally when it is safe to do so.
8695 */
8696static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8697{
8698 NOREF(pVM);
8699
8700 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8701
8702 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8703 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8704 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8705 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8706 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8707 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8708
8709 /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant
8710 * mode. */
8711 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8712 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8713
8714 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8715 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8716 Assert(!(ASMGetFlags() & X86_EFL_IF));
8717 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8718
8719#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8720 if (CPUMIsGuestFPUStateActive(pVCpu))
8721 {
8722 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8723 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8724 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8725 }
8726#endif
8727
8728#if HC_ARCH_BITS == 64
8729 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8730#endif
8731 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8732#ifdef VBOX_STRICT
8733 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8734#endif
8735 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
8736 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8737
8738 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8739 uint32_t uExitReason;
8740 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8741 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8742 AssertRC(rc);
8743 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8744 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8745
8746 /* Update the VM-exit history array. */
8747 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8748
8749 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8750 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8751 {
8752 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8753 pVmxTransient->fVMEntryFailed));
8754 return;
8755 }
8756
8757 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8758 {
8759 /** @todo We can optimize this by only syncing with our force-flags when
8760 * really needed and keeping the VMCS state as it is for most
8761 * VM-exits. */
8762 /* Update the guest interruptibility-state from the VMCS. */
8763 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8764
8765#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8766 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8767 AssertRC(rc);
8768#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8769 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8770 AssertRC(rc);
8771#endif
8772
8773 /*
8774 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8775 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8776 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8777 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8778 */
8779 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8780 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8781 {
8782 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8783 AssertRC(rc);
8784 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8785 }
8786 }
8787}
8788
8789
8790/**
8791 * Runs the guest code using VT-x the normal way.
8792 *
8793 * @returns VBox status code.
8794 * @param pVM Pointer to the VM.
8795 * @param pVCpu Pointer to the VMCPU.
8796 * @param pCtx Pointer to the guest-CPU context.
8797 *
8798 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8799 */
8800static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8801{
8802 VMXTRANSIENT VmxTransient;
8803 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8804 int rc = VERR_INTERNAL_ERROR_5;
8805 uint32_t cLoops = 0;
8806
8807 for (;; cLoops++)
8808 {
8809 Assert(!HMR0SuspendPending());
8810 HMVMX_ASSERT_CPU_SAFE();
8811
8812 /* Preparatory work for running guest code, this may force us to return
8813 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8814 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8815 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8816 if (rc != VINF_SUCCESS)
8817 break;
8818
8819 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8820 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8821 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8822
8823 /* Restore any residual host-state and save any bits shared between host
8824 and guest into the guest-CPU state. Re-enables interrupts! */
8825 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8826
8827 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8828 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8829 {
8830 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8831 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8832 return rc;
8833 }
8834
8835 /* Handle the VM-exit. */
8836 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8837 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8838 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8839 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8840 HMVMX_START_EXIT_DISPATCH_PROF();
8841#ifdef HMVMX_USE_FUNCTION_TABLE
8842 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8843#else
8844 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8845#endif
8846 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8847 if (rc != VINF_SUCCESS)
8848 break;
8849 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
8850 {
8851 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8852 rc = VINF_EM_RAW_INTERRUPT;
8853 break;
8854 }
8855 }
8856
8857 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8858 return rc;
8859}
8860
8861
8862/**
8863 * Single steps guest code using VT-x.
8864 *
8865 * @returns VBox status code.
8866 * @param pVM Pointer to the VM.
8867 * @param pVCpu Pointer to the VMCPU.
8868 * @param pCtx Pointer to the guest-CPU context.
8869 *
8870 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
8871 */
8872static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8873{
8874 VMXTRANSIENT VmxTransient;
8875 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8876 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8877 uint32_t cLoops = 0;
8878 uint16_t uCsStart = pCtx->cs.Sel;
8879 uint64_t uRipStart = pCtx->rip;
8880
8881 for (;; cLoops++)
8882 {
8883 Assert(!HMR0SuspendPending());
8884 HMVMX_ASSERT_CPU_SAFE();
8885
8886 /* Preparatory work for running guest code, this may force us to return
8887 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8888 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8889 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
8890 if (rcStrict != VINF_SUCCESS)
8891 break;
8892
8893 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8894 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8895 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8896
8897 /* Restore any residual host-state and save any bits shared between host
8898 and guest into the guest-CPU state. Re-enables interrupts! */
8899 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
8900
8901 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8902 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
8903 {
8904 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8905 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
8906 return VBOXSTRICTRC_TODO(rcStrict);
8907 }
8908
8909 /* Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitStep(). */
8910 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8911 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8912 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8913 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8914 HMVMX_START_EXIT_DISPATCH_PROF();
8915 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
8916 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8917 if (rcStrict != VINF_SUCCESS)
8918 break;
8919 if (cLoops > pVM->hm.s.cMaxResumeLoops)
8920 {
8921 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8922 rcStrict = VINF_EM_RAW_INTERRUPT;
8923 break;
8924 }
8925
8926 /*
8927 * Did the RIP change, if so, consider it a single step.
8928 * Otherwise, make sure one of the TFs gets set.
8929 */
8930 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
8931 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
8932 AssertRCReturn(rc2, rc2);
8933 if ( pCtx->rip != uRipStart
8934 || pCtx->cs.Sel != uCsStart)
8935 {
8936 rcStrict = VINF_EM_DBG_STEPPED;
8937 break;
8938 }
8939 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
8940 }
8941
8942 /*
8943 * Clear the X86_EFL_TF if necessary.
8944 */
8945 if (pVCpu->hm.s.fClearTrapFlag)
8946 {
8947 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
8948 AssertRCReturn(rc2, rc2);
8949 pVCpu->hm.s.fClearTrapFlag = false;
8950 pCtx->eflags.Bits.u1TF = 0;
8951 }
8952 /** @todo there seems to be issues with the resume flag when the monitor trap
8953 * flag is pending without being used. Seen early in bios init when
8954 * accessing APIC page in protected mode. */
8955
8956 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8957 return VBOXSTRICTRC_TODO(rcStrict);
8958}
8959
8960
8961/**
8962 * Runs the guest code using VT-x.
8963 *
8964 * @returns VBox status code.
8965 * @param pVM Pointer to the VM.
8966 * @param pVCpu Pointer to the VMCPU.
8967 * @param pCtx Pointer to the guest-CPU context.
8968 */
8969VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8970{
8971 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8972 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
8973 HMVMX_ASSERT_PREEMPT_SAFE();
8974
8975 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
8976
8977 int rc;
8978 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
8979 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
8980 else
8981 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
8982
8983 if (rc == VERR_EM_INTERPRETER)
8984 rc = VINF_EM_RAW_EMULATE_INSTR;
8985 else if (rc == VINF_EM_RESET)
8986 rc = VINF_EM_TRIPLE_FAULT;
8987
8988 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
8989 if (RT_FAILURE(rc2))
8990 {
8991 pVCpu->hm.s.u32HMError = rc;
8992 rc = rc2;
8993 }
8994 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
8995 return rc;
8996}
8997
8998
8999#ifndef HMVMX_USE_FUNCTION_TABLE
9000DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
9001{
9002#ifdef DEBUG_ramshankar
9003# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
9004# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
9005#endif
9006 int rc;
9007 switch (rcReason)
9008 {
9009 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9010 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9011 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9012 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9013 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9014 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9015 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9016 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9017 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9018 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9019 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9020 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9021 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9022 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9023 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9024 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9025 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9026 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9027 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9028 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9029 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9030 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9031 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9032 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9033 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9034 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9035 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9036 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9037 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9038 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9039 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9040 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9041 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9042 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9043
9044 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
9045 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
9046 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
9047 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
9048 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9049 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9050 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
9051 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
9052 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
9053
9054 case VMX_EXIT_VMCLEAR:
9055 case VMX_EXIT_VMLAUNCH:
9056 case VMX_EXIT_VMPTRLD:
9057 case VMX_EXIT_VMPTRST:
9058 case VMX_EXIT_VMREAD:
9059 case VMX_EXIT_VMRESUME:
9060 case VMX_EXIT_VMWRITE:
9061 case VMX_EXIT_VMXOFF:
9062 case VMX_EXIT_VMXON:
9063 case VMX_EXIT_INVEPT:
9064 case VMX_EXIT_INVVPID:
9065 case VMX_EXIT_VMFUNC:
9066 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9067 break;
9068 default:
9069 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9070 break;
9071 }
9072 return rc;
9073}
9074#endif /* !HMVMX_USE_FUNCTION_TABLE */
9075
9076
9077/**
9078 * Single-stepping VM-exit filtering.
9079 *
9080 * This is preprocessing the exits and deciding whether we've gotten far enough
9081 * to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit handling is
9082 * performed.
9083 *
9084 * @returns Strict VBox status code.
9085 * @param pVCpu The virtual CPU of the calling EMT.
9086 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9087 * out-of-sync. Make sure to update the required
9088 * fields before using them.
9089 * @param pVmxTransient Pointer to the VMX-transient structure.
9090 * @param uExitReason The VM-exit reason.
9091 */
9092DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9093 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
9094{
9095 switch (uExitReason)
9096 {
9097 case VMX_EXIT_XCPT_OR_NMI:
9098 {
9099 /* Check for host NMI. */
9100 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9101 AssertRCReturn(rc2, rc2);
9102 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9103 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9104 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9105 /* fall thru */
9106 }
9107
9108 case VMX_EXIT_EPT_MISCONFIG:
9109 case VMX_EXIT_TRIPLE_FAULT:
9110 case VMX_EXIT_APIC_ACCESS:
9111 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9112 case VMX_EXIT_TASK_SWITCH:
9113
9114 /* Instruction specific VM-exits: */
9115 case VMX_EXIT_IO_INSTR:
9116 case VMX_EXIT_CPUID:
9117 case VMX_EXIT_RDTSC:
9118 case VMX_EXIT_RDTSCP:
9119 case VMX_EXIT_MOV_CRX:
9120 case VMX_EXIT_MWAIT:
9121 case VMX_EXIT_MONITOR:
9122 case VMX_EXIT_RDMSR:
9123 case VMX_EXIT_WRMSR:
9124 case VMX_EXIT_MOV_DRX:
9125 case VMX_EXIT_HLT:
9126 case VMX_EXIT_INVD:
9127 case VMX_EXIT_INVLPG:
9128 case VMX_EXIT_RSM:
9129 case VMX_EXIT_PAUSE:
9130 case VMX_EXIT_XDTR_ACCESS:
9131 case VMX_EXIT_TR_ACCESS:
9132 case VMX_EXIT_WBINVD:
9133 case VMX_EXIT_XSETBV:
9134 case VMX_EXIT_RDRAND:
9135 case VMX_EXIT_INVPCID:
9136 case VMX_EXIT_GETSEC:
9137 case VMX_EXIT_RDPMC:
9138 case VMX_EXIT_VMCALL:
9139 case VMX_EXIT_VMCLEAR:
9140 case VMX_EXIT_VMLAUNCH:
9141 case VMX_EXIT_VMPTRLD:
9142 case VMX_EXIT_VMPTRST:
9143 case VMX_EXIT_VMREAD:
9144 case VMX_EXIT_VMRESUME:
9145 case VMX_EXIT_VMWRITE:
9146 case VMX_EXIT_VMXOFF:
9147 case VMX_EXIT_VMXON:
9148 case VMX_EXIT_INVEPT:
9149 case VMX_EXIT_INVVPID:
9150 case VMX_EXIT_VMFUNC:
9151 {
9152 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9153 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9154 AssertRCReturn(rc2, rc2);
9155 if ( pMixedCtx->rip != uRipStart
9156 || pMixedCtx->cs.Sel != uCsStart)
9157 return VINF_EM_DBG_STEPPED;
9158 break;
9159 }
9160 }
9161
9162 /*
9163 * Normal processing.
9164 */
9165#ifdef HMVMX_USE_FUNCTION_TABLE
9166 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9167#else
9168 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9169#endif
9170}
9171
9172
9173#ifdef DEBUG
9174/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9175# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9176 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9177
9178# define HMVMX_ASSERT_PREEMPT_CPUID() \
9179 do \
9180 { \
9181 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9182 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9183 } while (0)
9184
9185# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9186 do { \
9187 AssertPtr(pVCpu); \
9188 AssertPtr(pMixedCtx); \
9189 AssertPtr(pVmxTransient); \
9190 Assert(pVmxTransient->fVMEntryFailed == false); \
9191 Assert(ASMIntAreEnabled()); \
9192 HMVMX_ASSERT_PREEMPT_SAFE(); \
9193 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9194 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9195 HMVMX_ASSERT_PREEMPT_SAFE(); \
9196 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9197 HMVMX_ASSERT_PREEMPT_CPUID(); \
9198 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9199 } while (0)
9200
9201# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
9202 do { \
9203 Log4Func(("\n")); \
9204 } while (0)
9205#else /* Release builds */
9206# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9207 do { \
9208 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9209 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9210 } while (0)
9211# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9212#endif
9213
9214
9215/**
9216 * Advances the guest RIP after reading it from the VMCS.
9217 *
9218 * @returns VBox status code.
9219 * @param pVCpu Pointer to the VMCPU.
9220 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9221 * out-of-sync. Make sure to update the required fields
9222 * before using them.
9223 * @param pVmxTransient Pointer to the VMX transient structure.
9224 *
9225 * @remarks No-long-jump zone!!!
9226 */
9227DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9228{
9229 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9230 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9231 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9232 AssertRCReturn(rc, rc);
9233
9234 pMixedCtx->rip += pVmxTransient->cbInstr;
9235 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9236
9237 /*
9238 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9239 * pending debug exception field as it takes care of priority of events.
9240 *
9241 * See Intel spec. 32.2.1 "Debug Exceptions".
9242 */
9243 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9244
9245 return rc;
9246}
9247
9248
9249/**
9250 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9251 * and update error record fields accordingly.
9252 *
9253 * @return VMX_IGS_* return codes.
9254 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9255 * wrong with the guest state.
9256 *
9257 * @param pVM Pointer to the VM.
9258 * @param pVCpu Pointer to the VMCPU.
9259 * @param pCtx Pointer to the guest-CPU state.
9260 *
9261 * @remarks This function assumes our cache of the VMCS controls
9262 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9263 */
9264static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9265{
9266#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9267#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9268 uError = (err); \
9269 break; \
9270 } else do { } while (0)
9271
9272 int rc;
9273 uint32_t uError = VMX_IGS_ERROR;
9274 uint32_t u32Val;
9275 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9276
9277 do
9278 {
9279 /*
9280 * CR0.
9281 */
9282 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9283 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9284 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9285 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9286 if (fUnrestrictedGuest)
9287 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9288
9289 uint32_t u32GuestCR0;
9290 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9291 AssertRCBreak(rc);
9292 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9293 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9294 if ( !fUnrestrictedGuest
9295 && (u32GuestCR0 & X86_CR0_PG)
9296 && !(u32GuestCR0 & X86_CR0_PE))
9297 {
9298 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9299 }
9300
9301 /*
9302 * CR4.
9303 */
9304 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9305 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9306
9307 uint32_t u32GuestCR4;
9308 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9309 AssertRCBreak(rc);
9310 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9311 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9312
9313 /*
9314 * IA32_DEBUGCTL MSR.
9315 */
9316 uint64_t u64Val;
9317 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9318 AssertRCBreak(rc);
9319 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9320 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9321 {
9322 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9323 }
9324 uint64_t u64DebugCtlMsr = u64Val;
9325
9326#ifdef VBOX_STRICT
9327 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9328 AssertRCBreak(rc);
9329 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9330#endif
9331 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9332
9333 /*
9334 * RIP and RFLAGS.
9335 */
9336 uint32_t u32Eflags;
9337#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9338 if (HMVMX_IS_64BIT_HOST_MODE())
9339 {
9340 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9341 AssertRCBreak(rc);
9342 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9343 if ( !fLongModeGuest
9344 || !pCtx->cs.Attr.n.u1Long)
9345 {
9346 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9347 }
9348 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9349 * must be identical if the "IA-32e mode guest" VM-entry
9350 * control is 1 and CS.L is 1. No check applies if the
9351 * CPU supports 64 linear-address bits. */
9352
9353 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9354 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9355 AssertRCBreak(rc);
9356 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9357 VMX_IGS_RFLAGS_RESERVED);
9358 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9359 u32Eflags = u64Val;
9360 }
9361 else
9362#endif
9363 {
9364 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9365 AssertRCBreak(rc);
9366 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9367 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9368 }
9369
9370 if ( fLongModeGuest
9371 || ( fUnrestrictedGuest
9372 && !(u32GuestCR0 & X86_CR0_PE)))
9373 {
9374 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9375 }
9376
9377 uint32_t u32EntryInfo;
9378 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9379 AssertRCBreak(rc);
9380 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9381 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9382 {
9383 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9384 }
9385
9386 /*
9387 * 64-bit checks.
9388 */
9389#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9390 if (HMVMX_IS_64BIT_HOST_MODE())
9391 {
9392 if ( fLongModeGuest
9393 && !fUnrestrictedGuest)
9394 {
9395 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9396 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9397 }
9398
9399 if ( !fLongModeGuest
9400 && (u32GuestCR4 & X86_CR4_PCIDE))
9401 {
9402 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9403 }
9404
9405 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9406 * 51:32 beyond the processor's physical-address width are 0. */
9407
9408 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9409 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9410 {
9411 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9412 }
9413
9414 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9415 AssertRCBreak(rc);
9416 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9417
9418 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9419 AssertRCBreak(rc);
9420 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9421 }
9422#endif
9423
9424 /*
9425 * PERF_GLOBAL MSR.
9426 */
9427 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9428 {
9429 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9430 AssertRCBreak(rc);
9431 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9432 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9433 }
9434
9435 /*
9436 * PAT MSR.
9437 */
9438 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9439 {
9440 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9441 AssertRCBreak(rc);
9442 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9443 for (unsigned i = 0; i < 8; i++)
9444 {
9445 uint8_t u8Val = (u64Val & 0xff);
9446 if ( u8Val != 0 /* UC */
9447 && u8Val != 1 /* WC */
9448 && u8Val != 4 /* WT */
9449 && u8Val != 5 /* WP */
9450 && u8Val != 6 /* WB */
9451 && u8Val != 7 /* UC- */)
9452 {
9453 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9454 }
9455 u64Val >>= 8;
9456 }
9457 }
9458
9459 /*
9460 * EFER MSR.
9461 */
9462 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9463 {
9464 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9465 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9466 AssertRCBreak(rc);
9467 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9468 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9469 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9470 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9471 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9472 || !(u32GuestCR0 & X86_CR0_PG)
9473 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9474 VMX_IGS_EFER_LMA_LME_MISMATCH);
9475 }
9476
9477 /*
9478 * Segment registers.
9479 */
9480 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9481 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9482 if (!(u32Eflags & X86_EFL_VM))
9483 {
9484 /* CS */
9485 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9486 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9487 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9488 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9489 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9490 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9491 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9492 /* CS cannot be loaded with NULL in protected mode. */
9493 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9494 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9495 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9496 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9497 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9498 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9499 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9500 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9501 else
9502 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9503
9504 /* SS */
9505 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9506 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9507 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9508 if ( !(pCtx->cr0 & X86_CR0_PE)
9509 || pCtx->cs.Attr.n.u4Type == 3)
9510 {
9511 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9512 }
9513 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9514 {
9515 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9516 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9517 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9518 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9519 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9520 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9521 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9522 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9523 }
9524
9525 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9526 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9527 {
9528 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9529 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9530 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9531 || pCtx->ds.Attr.n.u4Type > 11
9532 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9533 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9534 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9535 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9536 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9537 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9538 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9539 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9540 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9541 }
9542 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9543 {
9544 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9545 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9546 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9547 || pCtx->es.Attr.n.u4Type > 11
9548 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9549 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9550 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9551 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9552 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9553 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9554 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9555 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9556 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9557 }
9558 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9559 {
9560 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9561 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9562 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9563 || pCtx->fs.Attr.n.u4Type > 11
9564 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9565 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9566 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9567 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9568 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9569 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9570 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9571 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9572 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9573 }
9574 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9575 {
9576 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9577 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9578 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9579 || pCtx->gs.Attr.n.u4Type > 11
9580 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9581 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9582 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9583 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9584 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9585 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9586 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9587 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9588 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9589 }
9590 /* 64-bit capable CPUs. */
9591#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9592 if (HMVMX_IS_64BIT_HOST_MODE())
9593 {
9594 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9595 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9596 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9597 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9598 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9599 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9600 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9601 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9602 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9603 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9604 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9605 }
9606#endif
9607 }
9608 else
9609 {
9610 /* V86 mode checks. */
9611 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9612 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9613 {
9614 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9615 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9616 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9617 }
9618 else
9619 {
9620 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9621 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9622 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9623 }
9624
9625 /* CS */
9626 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9627 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9628 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9629 /* SS */
9630 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9631 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9632 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9633 /* DS */
9634 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9635 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9636 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9637 /* ES */
9638 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9639 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9640 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9641 /* FS */
9642 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9643 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9644 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9645 /* GS */
9646 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9647 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9648 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9649 /* 64-bit capable CPUs. */
9650#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9651 if (HMVMX_IS_64BIT_HOST_MODE())
9652 {
9653 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9654 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9655 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9656 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9657 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9658 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9659 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9660 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9661 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9662 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9663 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9664 }
9665#endif
9666 }
9667
9668 /*
9669 * TR.
9670 */
9671 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9672 /* 64-bit capable CPUs. */
9673#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9674 if (HMVMX_IS_64BIT_HOST_MODE())
9675 {
9676 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9677 }
9678#endif
9679 if (fLongModeGuest)
9680 {
9681 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9682 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9683 }
9684 else
9685 {
9686 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9687 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9688 VMX_IGS_TR_ATTR_TYPE_INVALID);
9689 }
9690 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9691 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9692 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9693 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9694 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9695 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9696 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9697 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9698
9699 /*
9700 * GDTR and IDTR.
9701 */
9702#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9703 if (HMVMX_IS_64BIT_HOST_MODE())
9704 {
9705 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9706 AssertRCBreak(rc);
9707 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9708
9709 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9710 AssertRCBreak(rc);
9711 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9712 }
9713#endif
9714
9715 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9716 AssertRCBreak(rc);
9717 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9718
9719 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9720 AssertRCBreak(rc);
9721 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9722
9723 /*
9724 * Guest Non-Register State.
9725 */
9726 /* Activity State. */
9727 uint32_t u32ActivityState;
9728 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9729 AssertRCBreak(rc);
9730 HMVMX_CHECK_BREAK( !u32ActivityState
9731 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9732 VMX_IGS_ACTIVITY_STATE_INVALID);
9733 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9734 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9735 uint32_t u32IntrState;
9736 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9737 AssertRCBreak(rc);
9738 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9739 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9740 {
9741 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9742 }
9743
9744 /** @todo Activity state and injecting interrupts. Left as a todo since we
9745 * currently don't use activity states but ACTIVE. */
9746
9747 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9748 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9749
9750 /* Guest interruptibility-state. */
9751 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9752 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9753 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9754 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9755 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9756 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9757 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9758 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9759 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9760 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9761 {
9762 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9763 {
9764 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9765 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9766 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9767 }
9768 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9769 {
9770 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9771 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9772 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9773 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9774 }
9775 }
9776 /** @todo Assumes the processor is not in SMM. */
9777 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9778 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9779 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9780 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9781 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9782 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9783 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9784 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9785 {
9786 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9787 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9788 }
9789
9790 /* Pending debug exceptions. */
9791 if (HMVMX_IS_64BIT_HOST_MODE())
9792 {
9793 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9794 AssertRCBreak(rc);
9795 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9796 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9797 u32Val = u64Val; /* For pending debug exceptions checks below. */
9798 }
9799 else
9800 {
9801 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9802 AssertRCBreak(rc);
9803 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9804 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9805 }
9806
9807 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9808 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9809 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9810 {
9811 if ( (u32Eflags & X86_EFL_TF)
9812 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9813 {
9814 /* Bit 14 is PendingDebug.BS. */
9815 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9816 }
9817 if ( !(u32Eflags & X86_EFL_TF)
9818 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9819 {
9820 /* Bit 14 is PendingDebug.BS. */
9821 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9822 }
9823 }
9824
9825 /* VMCS link pointer. */
9826 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9827 AssertRCBreak(rc);
9828 if (u64Val != UINT64_C(0xffffffffffffffff))
9829 {
9830 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9831 /** @todo Bits beyond the processor's physical-address width MBZ. */
9832 /** @todo 32-bit located in memory referenced by value of this field (as a
9833 * physical address) must contain the processor's VMCS revision ID. */
9834 /** @todo SMM checks. */
9835 }
9836
9837 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9838 * not using Nested Paging? */
9839 if ( pVM->hm.s.fNestedPaging
9840 && !fLongModeGuest
9841 && CPUMIsGuestInPAEModeEx(pCtx))
9842 {
9843 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9844 AssertRCBreak(rc);
9845 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9846
9847 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9848 AssertRCBreak(rc);
9849 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9850
9851 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9852 AssertRCBreak(rc);
9853 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9854
9855 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9856 AssertRCBreak(rc);
9857 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9858 }
9859
9860 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9861 if (uError == VMX_IGS_ERROR)
9862 uError = VMX_IGS_REASON_NOT_FOUND;
9863 } while (0);
9864
9865 pVCpu->hm.s.u32HMError = uError;
9866 return uError;
9867
9868#undef HMVMX_ERROR_BREAK
9869#undef HMVMX_CHECK_BREAK
9870}
9871
9872/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9873/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
9874/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9875
9876/** @name VM-exit handlers.
9877 * @{
9878 */
9879
9880/**
9881 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
9882 */
9883HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9884{
9885 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9886 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
9887 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
9888 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
9889 return VINF_SUCCESS;
9890 return VINF_EM_RAW_INTERRUPT;
9891}
9892
9893
9894/**
9895 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9896 */
9897HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9898{
9899 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9900 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
9901
9902 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9903 AssertRCReturn(rc, rc);
9904
9905 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9906 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
9907 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
9908 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
9909
9910 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9911 {
9912 /*
9913 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
9914 * anything we inject is not going to cause a VM-exit directly for the event being injected.
9915 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
9916 *
9917 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
9918 */
9919 VMXDispatchHostNmi();
9920 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
9921 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9922 return VINF_SUCCESS;
9923 }
9924
9925 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9926 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9927 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9928 {
9929 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9930 return VINF_SUCCESS;
9931 }
9932 if (RT_UNLIKELY(rc == VINF_EM_RESET))
9933 {
9934 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9935 return rc;
9936 }
9937
9938 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
9939 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
9940 switch (uIntType)
9941 {
9942 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
9943 Assert(uVector == X86_XCPT_DB);
9944 /* no break */
9945 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
9946 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
9947 /* no break */
9948 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9949 {
9950 switch (uVector)
9951 {
9952 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
9953 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
9954 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
9955 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
9956 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
9957 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
9958#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9959 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
9960 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9961 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
9962 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9963 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
9964 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9965 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
9966 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9967 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
9968 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9969 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
9970 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9971#endif
9972 default:
9973 {
9974 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9975 AssertRCReturn(rc, rc);
9976
9977 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
9978 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9979 {
9980 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
9981 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
9982 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
9983
9984 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9985 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9986 AssertRCReturn(rc, rc);
9987 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
9988 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
9989 0 /* GCPtrFaultAddress */);
9990 AssertRCReturn(rc, rc);
9991 }
9992 else
9993 {
9994 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
9995 pVCpu->hm.s.u32HMError = uVector;
9996 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9997 }
9998 break;
9999 }
10000 }
10001 break;
10002 }
10003
10004 default:
10005 {
10006 pVCpu->hm.s.u32HMError = uExitIntInfo;
10007 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10008 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
10009 break;
10010 }
10011 }
10012 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10013 return rc;
10014}
10015
10016
10017/**
10018 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10019 */
10020HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10021{
10022 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10023
10024 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
10025 hmR0VmxClearIntWindowExitVmcs(pVCpu);
10026
10027 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10028 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
10029 return VINF_SUCCESS;
10030}
10031
10032
10033/**
10034 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10035 */
10036HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10037{
10038 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10039 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
10040 {
10041 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
10042 HMVMX_RETURN_UNEXPECTED_EXIT();
10043 }
10044
10045 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
10046
10047 /*
10048 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
10049 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
10050 */
10051 uint32_t uIntrState = 0;
10052 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10053 AssertRCReturn(rc, rc);
10054
10055 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
10056 if ( fBlockSti
10057 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
10058 {
10059 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10060 }
10061
10062 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
10063 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
10064
10065 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10066 return VINF_SUCCESS;
10067}
10068
10069
10070/**
10071 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10072 */
10073HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10074{
10075 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10076 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
10077 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10078}
10079
10080
10081/**
10082 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10087 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
10088 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10089}
10090
10091
10092/**
10093 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
10094 */
10095HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10096{
10097 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10098 PVM pVM = pVCpu->CTX_SUFF(pVM);
10099 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10100 if (RT_LIKELY(rc == VINF_SUCCESS))
10101 {
10102 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10103 Assert(pVmxTransient->cbInstr == 2);
10104 }
10105 else
10106 {
10107 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
10108 rc = VERR_EM_INTERPRETER;
10109 }
10110 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10111 return rc;
10112}
10113
10114
10115/**
10116 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
10117 */
10118HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10119{
10120 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10121 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10122 AssertRCReturn(rc, rc);
10123
10124 if (pMixedCtx->cr4 & X86_CR4_SMXE)
10125 return VINF_EM_RAW_EMULATE_INSTR;
10126
10127 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
10128 HMVMX_RETURN_UNEXPECTED_EXIT();
10129}
10130
10131
10132/**
10133 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10134 */
10135HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10136{
10137 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10138 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10139 AssertRCReturn(rc, rc);
10140
10141 PVM pVM = pVCpu->CTX_SUFF(pVM);
10142 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10143 if (RT_LIKELY(rc == VINF_SUCCESS))
10144 {
10145 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10146 Assert(pVmxTransient->cbInstr == 2);
10147 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10148 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10149 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10150 }
10151 else
10152 rc = VERR_EM_INTERPRETER;
10153 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10154 return rc;
10155}
10156
10157
10158/**
10159 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10160 */
10161HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10162{
10163 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10164 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10165 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10166 AssertRCReturn(rc, rc);
10167
10168 PVM pVM = pVCpu->CTX_SUFF(pVM);
10169 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10170 if (RT_LIKELY(rc == VINF_SUCCESS))
10171 {
10172 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10173 Assert(pVmxTransient->cbInstr == 3);
10174 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10175 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10176 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10177 }
10178 else
10179 {
10180 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
10181 rc = VERR_EM_INTERPRETER;
10182 }
10183 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10184 return rc;
10185}
10186
10187
10188/**
10189 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10190 */
10191HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10192{
10193 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10194 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10195 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10196 AssertRCReturn(rc, rc);
10197
10198 PVM pVM = pVCpu->CTX_SUFF(pVM);
10199 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10200 if (RT_LIKELY(rc == VINF_SUCCESS))
10201 {
10202 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10203 Assert(pVmxTransient->cbInstr == 2);
10204 }
10205 else
10206 {
10207 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
10208 rc = VERR_EM_INTERPRETER;
10209 }
10210 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10211 return rc;
10212}
10213
10214
10215/**
10216 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
10217 */
10218HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10219{
10220 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10221
10222 int rc = VERR_NOT_SUPPORTED;
10223 if (GIMAreHypercallsEnabled(pVCpu))
10224 {
10225 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10226 AssertRCReturn(rc, rc);
10227
10228 rc = GIMHypercall(pVCpu, pMixedCtx);
10229 }
10230 if (rc != VINF_SUCCESS)
10231 {
10232 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10233 rc = VINF_SUCCESS;
10234 }
10235
10236 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10237 return rc;
10238}
10239
10240
10241/**
10242 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10243 */
10244HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10245{
10246 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10247 PVM pVM = pVCpu->CTX_SUFF(pVM);
10248 Assert(!pVM->hm.s.fNestedPaging);
10249
10250 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10251 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10252 AssertRCReturn(rc, rc);
10253
10254 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10255 rc = VBOXSTRICTRC_VAL(rc2);
10256 if (RT_LIKELY(rc == VINF_SUCCESS))
10257 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10258 else
10259 {
10260 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10261 pVmxTransient->uExitQualification, rc));
10262 }
10263 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10264 return rc;
10265}
10266
10267
10268/**
10269 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10270 */
10271HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10272{
10273 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10274 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10275 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10276 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10277 AssertRCReturn(rc, rc);
10278
10279 PVM pVM = pVCpu->CTX_SUFF(pVM);
10280 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10281 if (RT_LIKELY(rc == VINF_SUCCESS))
10282 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10283 else
10284 {
10285 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10286 rc = VERR_EM_INTERPRETER;
10287 }
10288 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10289 return rc;
10290}
10291
10292
10293/**
10294 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10295 */
10296HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10297{
10298 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10299 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10300 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10301 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10302 AssertRCReturn(rc, rc);
10303
10304 PVM pVM = pVCpu->CTX_SUFF(pVM);
10305 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10306 rc = VBOXSTRICTRC_VAL(rc2);
10307 if (RT_LIKELY( rc == VINF_SUCCESS
10308 || rc == VINF_EM_HALT))
10309 {
10310 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10311 AssertRCReturn(rc3, rc3);
10312
10313 if ( rc == VINF_EM_HALT
10314 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10315 {
10316 rc = VINF_SUCCESS;
10317 }
10318 }
10319 else
10320 {
10321 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10322 rc = VERR_EM_INTERPRETER;
10323 }
10324 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10325 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10326 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10327 return rc;
10328}
10329
10330
10331/**
10332 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10333 */
10334HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10335{
10336 /*
10337 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10338 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10339 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10340 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10341 */
10342 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10343 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10344 HMVMX_RETURN_UNEXPECTED_EXIT();
10345}
10346
10347
10348/**
10349 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10350 */
10351HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10352{
10353 /*
10354 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10355 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
10356 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10357 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10358 */
10359 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10360 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10361 HMVMX_RETURN_UNEXPECTED_EXIT();
10362}
10363
10364
10365/**
10366 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10367 */
10368HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10369{
10370 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10371 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10372 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10373 HMVMX_RETURN_UNEXPECTED_EXIT();
10374}
10375
10376
10377/**
10378 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10379 */
10380HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10381{
10382 /*
10383 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10384 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10385 * See Intel spec. 25.3 "Other Causes of VM-exits".
10386 */
10387 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10388 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10389 HMVMX_RETURN_UNEXPECTED_EXIT();
10390}
10391
10392
10393/**
10394 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10395 * VM-exit.
10396 */
10397HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10398{
10399 /*
10400 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10401 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10402 *
10403 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10404 * See Intel spec. "23.8 Restrictions on VMX operation".
10405 */
10406 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10407 return VINF_SUCCESS;
10408}
10409
10410
10411/**
10412 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10413 * VM-exit.
10414 */
10415HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10416{
10417 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10418 return VINF_EM_RESET;
10419}
10420
10421
10422/**
10423 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10424 */
10425HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10426{
10427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10428 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10429 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10430 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10431 AssertRCReturn(rc, rc);
10432
10433 pMixedCtx->rip++;
10434 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10435 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10436 rc = VINF_SUCCESS;
10437 else
10438 rc = VINF_EM_HALT;
10439
10440 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10441 return rc;
10442}
10443
10444
10445/**
10446 * VM-exit handler for instructions that result in a #UD exception delivered to
10447 * the guest.
10448 */
10449HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10450{
10451 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10452 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10453 return VINF_SUCCESS;
10454}
10455
10456
10457/**
10458 * VM-exit handler for expiry of the VMX preemption timer.
10459 */
10460HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10461{
10462 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10463
10464 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10465 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10466
10467 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10468 PVM pVM = pVCpu->CTX_SUFF(pVM);
10469 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10470 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10471 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10472}
10473
10474
10475/**
10476 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10477 */
10478HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10479{
10480 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10481
10482 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
10483 /** @todo check if XSETBV is supported by the recompiler. */
10484 return VERR_EM_INTERPRETER;
10485}
10486
10487
10488/**
10489 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10490 */
10491HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10492{
10493 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10494
10495 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
10496 /** @todo implement EMInterpretInvpcid() */
10497 return VERR_EM_INTERPRETER;
10498}
10499
10500
10501/**
10502 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10503 * Error VM-exit.
10504 */
10505HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10506{
10507 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10508 AssertRCReturn(rc, rc);
10509
10510 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10511 AssertRCReturn(rc, rc);
10512
10513 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10514 NOREF(uInvalidReason);
10515
10516#ifdef VBOX_STRICT
10517 uint32_t uIntrState;
10518 HMVMXHCUINTREG uHCReg;
10519 uint64_t u64Val;
10520 uint32_t u32Val;
10521
10522 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10523 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10524 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10525 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10526 AssertRCReturn(rc, rc);
10527
10528 Log4(("uInvalidReason %u\n", uInvalidReason));
10529 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10530 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10531 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10532 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10533
10534 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10535 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10536 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10537 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10538 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10539 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10540 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10541 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10542 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10543 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10544 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10545 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10546#else
10547 NOREF(pVmxTransient);
10548#endif
10549
10550 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10551 return VERR_VMX_INVALID_GUEST_STATE;
10552}
10553
10554
10555/**
10556 * VM-exit handler for VM-entry failure due to an MSR-load
10557 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10558 */
10559HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10560{
10561 NOREF(pVmxTransient);
10562 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10563 HMVMX_RETURN_UNEXPECTED_EXIT();
10564}
10565
10566
10567/**
10568 * VM-exit handler for VM-entry failure due to a machine-check event
10569 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10570 */
10571HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10572{
10573 NOREF(pVmxTransient);
10574 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10575 HMVMX_RETURN_UNEXPECTED_EXIT();
10576}
10577
10578
10579/**
10580 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10581 * theory.
10582 */
10583HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10584{
10585 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10586 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10587 return VERR_VMX_UNDEFINED_EXIT_CODE;
10588}
10589
10590
10591/**
10592 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10593 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10594 * Conditional VM-exit.
10595 */
10596HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10597{
10598 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10599
10600 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10601 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10602 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10603 return VERR_EM_INTERPRETER;
10604 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10605 HMVMX_RETURN_UNEXPECTED_EXIT();
10606}
10607
10608
10609/**
10610 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10611 */
10612HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10613{
10614 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10615
10616 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10617 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10618 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10619 return VERR_EM_INTERPRETER;
10620 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10621 HMVMX_RETURN_UNEXPECTED_EXIT();
10622}
10623
10624
10625/**
10626 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10627 */
10628HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10629{
10630 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10631
10632 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10633 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10634 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10635 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10636 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10637 {
10638 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10639 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10640 }
10641 AssertRCReturn(rc, rc);
10642 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10643
10644#ifdef VBOX_STRICT
10645 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10646 {
10647 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10648 && pMixedCtx->ecx != MSR_K6_EFER)
10649 {
10650 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10651 HMVMX_RETURN_UNEXPECTED_EXIT();
10652 }
10653# if HC_ARCH_BITS == 64
10654 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10655 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10656 {
10657 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10658 HMVMX_RETURN_UNEXPECTED_EXIT();
10659 }
10660# endif
10661 }
10662#endif
10663
10664 PVM pVM = pVCpu->CTX_SUFF(pVM);
10665 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10666 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10667 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10668 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10669 if (RT_LIKELY(rc == VINF_SUCCESS))
10670 {
10671 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10672 Assert(pVmxTransient->cbInstr == 2);
10673 }
10674 return rc;
10675}
10676
10677
10678/**
10679 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10680 */
10681HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10682{
10683 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10684 PVM pVM = pVCpu->CTX_SUFF(pVM);
10685 int rc = VINF_SUCCESS;
10686
10687 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10688 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10689 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10690 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10691 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10692 {
10693 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10694 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10695 }
10696 AssertRCReturn(rc, rc);
10697 Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
10698
10699 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10700 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10701 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10702
10703 if (RT_LIKELY(rc == VINF_SUCCESS))
10704 {
10705 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10706
10707 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10708 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10709 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10710 {
10711 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10712 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10713 EMInterpretWrmsr() changes it. */
10714 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10715 }
10716 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10717 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10718 else if (pMixedCtx->ecx == MSR_K6_EFER)
10719 {
10720 /*
10721 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10722 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10723 * the other bits as well, SCE and NXE. See @bugref{7368}.
10724 */
10725 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10726 }
10727
10728 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10729 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10730 {
10731 switch (pMixedCtx->ecx)
10732 {
10733 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10734 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10735 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10736 case MSR_K8_FS_BASE: /* no break */
10737 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10738 case MSR_K6_EFER: /* already handled above */ break;
10739 default:
10740 {
10741 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10742 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10743#if HC_ARCH_BITS == 64
10744 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10745 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10746#endif
10747 break;
10748 }
10749 }
10750 }
10751#ifdef VBOX_STRICT
10752 else
10753 {
10754 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10755 switch (pMixedCtx->ecx)
10756 {
10757 case MSR_IA32_SYSENTER_CS:
10758 case MSR_IA32_SYSENTER_EIP:
10759 case MSR_IA32_SYSENTER_ESP:
10760 case MSR_K8_FS_BASE:
10761 case MSR_K8_GS_BASE:
10762 {
10763 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10764 HMVMX_RETURN_UNEXPECTED_EXIT();
10765 }
10766
10767 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10768 default:
10769 {
10770 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10771 {
10772 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10773 if (pMixedCtx->ecx != MSR_K6_EFER)
10774 {
10775 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10776 pMixedCtx->ecx));
10777 HMVMX_RETURN_UNEXPECTED_EXIT();
10778 }
10779 }
10780
10781#if HC_ARCH_BITS == 64
10782 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10783 {
10784 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10785 HMVMX_RETURN_UNEXPECTED_EXIT();
10786 }
10787#endif
10788 break;
10789 }
10790 }
10791 }
10792#endif /* VBOX_STRICT */
10793 }
10794 return rc;
10795}
10796
10797
10798/**
10799 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10800 */
10801HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10802{
10803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10804
10805 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
10806 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10807 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10808 return VERR_EM_INTERPRETER;
10809 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10810 HMVMX_RETURN_UNEXPECTED_EXIT();
10811}
10812
10813
10814/**
10815 * VM-exit handler for when the TPR value is lowered below the specified
10816 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10817 */
10818HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10819{
10820 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10821 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
10822
10823 /*
10824 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
10825 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
10826 * resume guest execution.
10827 */
10828 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
10830 return VINF_SUCCESS;
10831}
10832
10833
10834/**
10835 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
10836 * VM-exit.
10837 *
10838 * @retval VINF_SUCCESS when guest execution can continue.
10839 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
10840 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
10841 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
10842 * recompiler.
10843 */
10844HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10845{
10846 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10847 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
10848 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10849 AssertRCReturn(rc, rc);
10850
10851 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
10852 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
10853 PVM pVM = pVCpu->CTX_SUFF(pVM);
10854 switch (uAccessType)
10855 {
10856 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
10857 {
10858#if 0
10859 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
10860 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10861#else
10862 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
10863 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10864 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10865#endif
10866 AssertRCReturn(rc, rc);
10867
10868 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10869 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
10870 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
10871 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
10872
10873 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
10874 {
10875 case 0: /* CR0 */
10876 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10877 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
10878 break;
10879 case 2: /* CR2 */
10880 /* Nothing to do here, CR2 it's not part of the VMCS. */
10881 break;
10882 case 3: /* CR3 */
10883 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
10884 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
10885 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
10886 break;
10887 case 4: /* CR4 */
10888 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
10889 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
10890 break;
10891 case 8: /* CR8 */
10892 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10893 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
10894 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10895 break;
10896 default:
10897 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
10898 break;
10899 }
10900
10901 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10902 break;
10903 }
10904
10905 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
10906 {
10907 /* EMInterpretCRxRead() requires EFER MSR, CS. */
10908 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10909 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10910 AssertRCReturn(rc, rc);
10911 Assert( !pVM->hm.s.fNestedPaging
10912 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
10913 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
10914
10915 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
10916 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
10917 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10918
10919 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10920 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
10921 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
10922 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
10923 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10924 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
10925 break;
10926 }
10927
10928 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
10929 {
10930 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10931 AssertRCReturn(rc, rc);
10932 rc = EMInterpretCLTS(pVM, pVCpu);
10933 AssertRCReturn(rc, rc);
10934 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10935 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
10936 Log4(("CRX CLTS write rc=%d\n", rc));
10937 break;
10938 }
10939
10940 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10941 {
10942 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10943 AssertRCReturn(rc, rc);
10944 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
10945 if (RT_LIKELY(rc == VINF_SUCCESS))
10946 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10947 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
10948 Log4(("CRX LMSW write rc=%d\n", rc));
10949 break;
10950 }
10951
10952 default:
10953 {
10954 AssertMsgFailed(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType));
10955 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
10956 }
10957 }
10958
10959 /* Validate possible error codes. */
10960 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
10961 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
10962 if (RT_SUCCESS(rc))
10963 {
10964 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10965 AssertRCReturn(rc2, rc2);
10966 }
10967
10968 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
10969 return rc;
10970}
10971
10972
10973/**
10974 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
10975 * VM-exit.
10976 */
10977HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10978{
10979 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10980 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
10981
10982 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10983 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10984 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10985 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
10986 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
10987 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
10988 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
10989 AssertRCReturn(rc2, rc2);
10990
10991 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
10992 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
10993 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
10994 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
10995 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
10996 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
10997 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
10998 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
10999
11000 /* I/O operation lookup arrays. */
11001 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
11002 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
11003
11004 VBOXSTRICTRC rcStrict;
11005 uint32_t const cbValue = s_aIOSizes[uIOWidth];
11006 uint32_t const cbInstr = pVmxTransient->cbInstr;
11007 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
11008 PVM pVM = pVCpu->CTX_SUFF(pVM);
11009 if (fIOString)
11010 {
11011#if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */
11012 /*
11013 * INS/OUTS - I/O String instruction.
11014 *
11015 * Use instruction-information if available, otherwise fall back on
11016 * interpreting the instruction.
11017 */
11018 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11019 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
11020 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
11021 {
11022 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11023 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11024 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11025 AssertRCReturn(rc2, rc2);
11026 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
11027 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
11028 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
11029 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
11030 if (fIOWrite)
11031 {
11032 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
11033 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
11034 }
11035 else
11036 {
11037 /*
11038 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
11039 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
11040 * See Intel Instruction spec. for "INS".
11041 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
11042 */
11043 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
11044 }
11045 }
11046 else
11047 {
11048 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11049 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11050 AssertRCReturn(rc2, rc2);
11051 rcStrict = IEMExecOne(pVCpu);
11052 }
11053 /** @todo IEM needs to be setting these flags somehow. */
11054 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11055 fUpdateRipAlready = true;
11056#else
11057 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11058 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
11059 if (RT_SUCCESS(rcStrict))
11060 {
11061 if (fIOWrite)
11062 {
11063 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11064 (DISCPUMODE)pDis->uAddrMode, cbValue);
11065 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
11066 }
11067 else
11068 {
11069 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11070 (DISCPUMODE)pDis->uAddrMode, cbValue);
11071 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
11072 }
11073 }
11074 else
11075 {
11076 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
11077 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
11078 }
11079#endif
11080 }
11081 else
11082 {
11083 /*
11084 * IN/OUT - I/O instruction.
11085 */
11086 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11087 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
11088 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
11089 if (fIOWrite)
11090 {
11091 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
11092 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11093 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11094 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
11095 }
11096 else
11097 {
11098 uint32_t u32Result = 0;
11099 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
11100 if (IOM_SUCCESS(rcStrict))
11101 {
11102 /* Save result of I/O IN instr. in AL/AX/EAX. */
11103 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
11104 }
11105 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11106 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11107 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11108 }
11109 }
11110
11111 if (IOM_SUCCESS(rcStrict))
11112 {
11113 if (!fUpdateRipAlready)
11114 {
11115 pMixedCtx->rip += cbInstr;
11116 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11117 }
11118
11119 /*
11120 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
11121 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
11122 */
11123 if (fIOString)
11124 {
11125 /** @todo Single-step for INS/OUTS with REP prefix? */
11126 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11127 }
11128 else if (fStepping)
11129 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11130
11131 /*
11132 * If any I/O breakpoints are armed, we need to check if one triggered
11133 * and take appropriate action.
11134 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
11135 */
11136 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11137 AssertRCReturn(rc2, rc2);
11138
11139 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
11140 * execution engines about whether hyper BPs and such are pending. */
11141 uint32_t const uDr7 = pMixedCtx->dr[7];
11142 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
11143 && X86_DR7_ANY_RW_IO(uDr7)
11144 && (pMixedCtx->cr4 & X86_CR4_DE))
11145 || DBGFBpIsHwIoArmed(pVM)))
11146 {
11147 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11148
11149 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
11150 VMMRZCallRing3Disable(pVCpu);
11151 HM_DISABLE_PREEMPT_IF_NEEDED();
11152
11153 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
11154
11155 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11156 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
11157 {
11158 /* Raise #DB. */
11159 if (fIsGuestDbgActive)
11160 ASMSetDR6(pMixedCtx->dr[6]);
11161 if (pMixedCtx->dr[7] != uDr7)
11162 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11163
11164 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11165 }
11166 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
11167 else if ( rcStrict2 != VINF_SUCCESS
11168 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
11169 rcStrict = rcStrict2;
11170
11171 HM_RESTORE_PREEMPT_IF_NEEDED();
11172 VMMRZCallRing3Enable(pVCpu);
11173 }
11174 }
11175
11176#ifdef DEBUG
11177 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11178 Assert(!fIOWrite);
11179 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11180 Assert(fIOWrite);
11181 else
11182 {
11183 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
11184 * statuses, that the VMM device and some others may return. See
11185 * IOM_SUCCESS() for guidance. */
11186 AssertMsg( RT_FAILURE(rcStrict)
11187 || rcStrict == VINF_SUCCESS
11188 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11189 || rcStrict == VINF_EM_DBG_BREAKPOINT
11190 || rcStrict == VINF_EM_RAW_GUEST_TRAP
11191 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11192 }
11193#endif
11194
11195 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11196 return VBOXSTRICTRC_TODO(rcStrict);
11197}
11198
11199
11200/**
11201 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
11202 * VM-exit.
11203 */
11204HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11205{
11206 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11207
11208 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
11209 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11210 AssertRCReturn(rc, rc);
11211 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
11212 {
11213 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
11214 AssertRCReturn(rc, rc);
11215 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
11216 {
11217 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
11218
11219 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
11220 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
11221
11222 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
11223 Assert(!pVCpu->hm.s.Event.fPending);
11224 pVCpu->hm.s.Event.fPending = true;
11225 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11226 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
11227 AssertRCReturn(rc, rc);
11228 if (fErrorCodeValid)
11229 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11230 else
11231 pVCpu->hm.s.Event.u32ErrCode = 0;
11232 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
11233 && uVector == X86_XCPT_PF)
11234 {
11235 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11236 }
11237
11238 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11239 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11240 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11241 }
11242 }
11243
11244 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11245 * emulation. */
11246 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11247 return VERR_EM_INTERPRETER;
11248}
11249
11250
11251/**
11252 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11253 */
11254HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11255{
11256 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11257 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11258 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11259 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11260 AssertRCReturn(rc, rc);
11261 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11262 return VINF_EM_DBG_STEPPED;
11263}
11264
11265
11266/**
11267 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11268 */
11269HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11270{
11271 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11272
11273 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11274 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11275 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11276 return VINF_SUCCESS;
11277 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11278 return rc;
11279
11280#if 0
11281 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11282 * just sync the whole thing. */
11283 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11284#else
11285 /* Aggressive state sync. for now. */
11286 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11287 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11288 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11289#endif
11290 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11291 AssertRCReturn(rc, rc);
11292
11293 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11294 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11295 switch (uAccessType)
11296 {
11297 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11298 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11299 {
11300 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11301 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
11302 {
11303 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11304 }
11305
11306 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11307 GCPhys &= PAGE_BASE_GC_MASK;
11308 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11309 PVM pVM = pVCpu->CTX_SUFF(pVM);
11310 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11311 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11312
11313 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11314 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
11315 CPUMCTX2CORE(pMixedCtx), GCPhys);
11316 rc = VBOXSTRICTRC_VAL(rc2);
11317 Log4(("ApicAccess rc=%d\n", rc));
11318 if ( rc == VINF_SUCCESS
11319 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11320 || rc == VERR_PAGE_NOT_PRESENT)
11321 {
11322 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11323 | HM_CHANGED_GUEST_RSP
11324 | HM_CHANGED_GUEST_RFLAGS
11325 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11326 rc = VINF_SUCCESS;
11327 }
11328 break;
11329 }
11330
11331 default:
11332 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11333 rc = VINF_EM_RAW_EMULATE_INSTR;
11334 break;
11335 }
11336
11337 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11338 if (rc != VINF_SUCCESS)
11339 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccessToR3);
11340 return rc;
11341}
11342
11343
11344/**
11345 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11346 * VM-exit.
11347 */
11348HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11349{
11350 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11351
11352 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11353 if (pVmxTransient->fWasGuestDebugStateActive)
11354 {
11355 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11356 HMVMX_RETURN_UNEXPECTED_EXIT();
11357 }
11358
11359 int rc = VERR_INTERNAL_ERROR_5;
11360 if ( !DBGFIsStepping(pVCpu)
11361 && !pVCpu->hm.s.fSingleInstruction
11362 && !pVmxTransient->fWasHyperDebugStateActive)
11363 {
11364 /* Don't intercept MOV DRx and #DB any more. */
11365 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11366 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11367 AssertRCReturn(rc, rc);
11368
11369 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11370 {
11371#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11372 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11373 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
11374 AssertRCReturn(rc, rc);
11375#endif
11376 }
11377
11378 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11379 VMMRZCallRing3Disable(pVCpu);
11380 HM_DISABLE_PREEMPT_IF_NEEDED();
11381
11382 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11383 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11384 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11385
11386 HM_RESTORE_PREEMPT_IF_NEEDED();
11387 VMMRZCallRing3Enable(pVCpu);
11388
11389#ifdef VBOX_WITH_STATISTICS
11390 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11391 AssertRCReturn(rc, rc);
11392 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11393 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11394 else
11395 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11396#endif
11397 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11398 return VINF_SUCCESS;
11399 }
11400
11401 /*
11402 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11403 * Update the segment registers and DR7 from the CPU.
11404 */
11405 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11406 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11407 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11408 AssertRCReturn(rc, rc);
11409 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11410
11411 PVM pVM = pVCpu->CTX_SUFF(pVM);
11412 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11413 {
11414 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11415 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11416 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11417 if (RT_SUCCESS(rc))
11418 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11419 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11420 }
11421 else
11422 {
11423 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11424 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11425 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11426 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11427 }
11428
11429 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11430 if (RT_SUCCESS(rc))
11431 {
11432 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11433 AssertRCReturn(rc2, rc2);
11434 }
11435 return rc;
11436}
11437
11438
11439/**
11440 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11441 * Conditional VM-exit.
11442 */
11443HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11444{
11445 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11446 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11447
11448 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11449 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11450 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11451 return VINF_SUCCESS;
11452 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11453 return rc;
11454
11455 RTGCPHYS GCPhys = 0;
11456 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11457
11458#if 0
11459 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11460#else
11461 /* Aggressive state sync. for now. */
11462 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11463 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11464 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11465#endif
11466 AssertRCReturn(rc, rc);
11467
11468 /*
11469 * If we succeed, resume guest execution.
11470 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11471 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11472 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11473 * weird case. See @bugref{6043}.
11474 */
11475 PVM pVM = pVCpu->CTX_SUFF(pVM);
11476 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11477 rc = VBOXSTRICTRC_VAL(rc2);
11478 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11479 if ( rc == VINF_SUCCESS
11480 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11481 || rc == VERR_PAGE_NOT_PRESENT)
11482 {
11483 /* Successfully handled MMIO operation. */
11484 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11485 | HM_CHANGED_GUEST_RSP
11486 | HM_CHANGED_GUEST_RFLAGS
11487 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11488 rc = VINF_SUCCESS;
11489 }
11490 return rc;
11491}
11492
11493
11494/**
11495 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11496 * VM-exit.
11497 */
11498HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11499{
11500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11501 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11502
11503 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11504 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11505 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11506 return VINF_SUCCESS;
11507 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11508 return rc;
11509
11510 RTGCPHYS GCPhys = 0;
11511 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11512 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11513#if 0
11514 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11515#else
11516 /* Aggressive state sync. for now. */
11517 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11518 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11519 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11520#endif
11521 AssertRCReturn(rc, rc);
11522
11523 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11524 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11525
11526 RTGCUINT uErrorCode = 0;
11527 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11528 uErrorCode |= X86_TRAP_PF_ID;
11529 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11530 uErrorCode |= X86_TRAP_PF_RW;
11531 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11532 uErrorCode |= X86_TRAP_PF_P;
11533
11534 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11535
11536 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
11537 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11538
11539 /* Handle the pagefault trap for the nested shadow table. */
11540 PVM pVM = pVCpu->CTX_SUFF(pVM);
11541 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11542 TRPMResetTrap(pVCpu);
11543
11544 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11545 if ( rc == VINF_SUCCESS
11546 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11547 || rc == VERR_PAGE_NOT_PRESENT)
11548 {
11549 /* Successfully synced our nested page tables. */
11550 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11551 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11552 | HM_CHANGED_GUEST_RSP
11553 | HM_CHANGED_GUEST_RFLAGS);
11554 return VINF_SUCCESS;
11555 }
11556
11557 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11558 return rc;
11559}
11560
11561/** @} */
11562
11563/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11564/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11565/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11566
11567/** @name VM-exit exception handlers.
11568 * @{
11569 */
11570
11571/**
11572 * VM-exit exception handler for #MF (Math Fault: floating point exception).
11573 */
11574static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11575{
11576 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11577 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11578
11579 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11580 AssertRCReturn(rc, rc);
11581
11582 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11583 {
11584 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11585 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11586
11587 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11588 * provides VM-exit instruction length. If this causes problem later,
11589 * disassemble the instruction like it's done on AMD-V. */
11590 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11591 AssertRCReturn(rc2, rc2);
11592 return rc;
11593 }
11594
11595 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11596 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11597 return rc;
11598}
11599
11600
11601/**
11602 * VM-exit exception handler for #BP (Breakpoint exception).
11603 */
11604static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11605{
11606 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11607 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11608
11609 /** @todo Try optimize this by not saving the entire guest state unless
11610 * really needed. */
11611 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11612 AssertRCReturn(rc, rc);
11613
11614 PVM pVM = pVCpu->CTX_SUFF(pVM);
11615 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11616 if (rc == VINF_EM_RAW_GUEST_TRAP)
11617 {
11618 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11619 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11620 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11621 AssertRCReturn(rc, rc);
11622
11623 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11624 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11625 }
11626
11627 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11628 return rc;
11629}
11630
11631
11632/**
11633 * VM-exit exception handler for #DB (Debug exception).
11634 */
11635static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11636{
11637 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11638 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11639 Log6(("XcptDB\n"));
11640
11641 /*
11642 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
11643 * for processing.
11644 */
11645 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11646 AssertRCReturn(rc, rc);
11647
11648 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11649 uint64_t uDR6 = X86_DR6_INIT_VAL;
11650 uDR6 |= ( pVmxTransient->uExitQualification
11651 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11652
11653 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11654 if (rc == VINF_EM_RAW_GUEST_TRAP)
11655 {
11656 /*
11657 * The exception was for the guest. Update DR6, DR7.GD and
11658 * IA32_DEBUGCTL.LBR before forwarding it.
11659 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11660 */
11661 VMMRZCallRing3Disable(pVCpu);
11662 HM_DISABLE_PREEMPT_IF_NEEDED();
11663
11664 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11665 pMixedCtx->dr[6] |= uDR6;
11666 if (CPUMIsGuestDebugStateActive(pVCpu))
11667 ASMSetDR6(pMixedCtx->dr[6]);
11668
11669 HM_RESTORE_PREEMPT_IF_NEEDED();
11670 VMMRZCallRing3Enable(pVCpu);
11671
11672 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11673 AssertRCReturn(rc, rc);
11674
11675 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11676 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11677
11678 /* Paranoia. */
11679 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11680 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11681
11682 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11683 AssertRCReturn(rc, rc);
11684
11685 /*
11686 * Raise #DB in the guest.
11687 *
11688 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11689 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11690 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11691 *
11692 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11693 */
11694 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11695 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11696 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11697 AssertRCReturn(rc, rc);
11698 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11699 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11700 return VINF_SUCCESS;
11701 }
11702
11703 /*
11704 * Not a guest trap, must be a hypervisor related debug event then.
11705 * Update DR6 in case someone is interested in it.
11706 */
11707 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11708 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11709 CPUMSetHyperDR6(pVCpu, uDR6);
11710
11711 return rc;
11712}
11713
11714
11715/**
11716 * VM-exit exception handler for #NM (Device-not-available exception: floating
11717 * point exception).
11718 */
11719static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11720{
11721 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11722
11723 /* We require CR0 and EFER. EFER is always up-to-date. */
11724 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11725 AssertRCReturn(rc, rc);
11726
11727 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11728 VMMRZCallRing3Disable(pVCpu);
11729 HM_DISABLE_PREEMPT_IF_NEEDED();
11730
11731 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11732 if (pVmxTransient->fWasGuestFPUStateActive)
11733 {
11734 rc = VINF_EM_RAW_GUEST_TRAP;
11735 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11736 }
11737 else
11738 {
11739#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11740 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11741#endif
11742 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11743 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11744 }
11745
11746 HM_RESTORE_PREEMPT_IF_NEEDED();
11747 VMMRZCallRing3Enable(pVCpu);
11748
11749 if (rc == VINF_SUCCESS)
11750 {
11751 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11752 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11753 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11754 pVCpu->hm.s.fUseGuestFpu = true;
11755 }
11756 else
11757 {
11758 /* Forward #NM to the guest. */
11759 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11760 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11761 AssertRCReturn(rc, rc);
11762 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11763 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11764 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11765 }
11766
11767 return VINF_SUCCESS;
11768}
11769
11770
11771/**
11772 * VM-exit exception handler for #GP (General-protection exception).
11773 *
11774 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11775 */
11776static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11777{
11778 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11779 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11780
11781 int rc = VERR_INTERNAL_ERROR_5;
11782 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11783 {
11784#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11785 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11786 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11787 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11788 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11789 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11790 AssertRCReturn(rc, rc);
11791 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11792 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11793 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11794 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11795 return rc;
11796#else
11797 /* We don't intercept #GP. */
11798 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11799 NOREF(pVmxTransient);
11800 return VERR_VMX_UNEXPECTED_EXCEPTION;
11801#endif
11802 }
11803
11804 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11805 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11806
11807 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11808 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11809 AssertRCReturn(rc, rc);
11810
11811 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11812 uint32_t cbOp = 0;
11813 PVM pVM = pVCpu->CTX_SUFF(pVM);
11814 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11815 if (RT_SUCCESS(rc))
11816 {
11817 rc = VINF_SUCCESS;
11818 Assert(cbOp == pDis->cbInstr);
11819 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11820 switch (pDis->pCurInstr->uOpcode)
11821 {
11822 case OP_CLI:
11823 {
11824 pMixedCtx->eflags.Bits.u1IF = 0;
11825 pMixedCtx->eflags.Bits.u1RF = 0;
11826 pMixedCtx->rip += pDis->cbInstr;
11827 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11828 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
11830 break;
11831 }
11832
11833 case OP_STI:
11834 {
11835 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
11836 pMixedCtx->eflags.Bits.u1IF = 1;
11837 pMixedCtx->eflags.Bits.u1RF = 0;
11838 pMixedCtx->rip += pDis->cbInstr;
11839 if (!fOldIF)
11840 {
11841 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
11842 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
11843 }
11844 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11845 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11846 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
11847 break;
11848 }
11849
11850 case OP_HLT:
11851 {
11852 rc = VINF_EM_HALT;
11853 pMixedCtx->rip += pDis->cbInstr;
11854 pMixedCtx->eflags.Bits.u1RF = 0;
11855 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11856 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11857 break;
11858 }
11859
11860 case OP_POPF:
11861 {
11862 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11863 uint32_t cbParm;
11864 uint32_t uMask;
11865 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11866 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11867 {
11868 cbParm = 4;
11869 uMask = 0xffffffff;
11870 }
11871 else
11872 {
11873 cbParm = 2;
11874 uMask = 0xffff;
11875 }
11876
11877 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
11878 RTGCPTR GCPtrStack = 0;
11879 X86EFLAGS Eflags;
11880 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11881 &GCPtrStack);
11882 if (RT_SUCCESS(rc))
11883 {
11884 Assert(sizeof(Eflags.u32) >= cbParm);
11885 Eflags.u32 = 0;
11886 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
11887 }
11888 if (RT_FAILURE(rc))
11889 {
11890 rc = VERR_EM_INTERPRETER;
11891 break;
11892 }
11893 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
11894 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
11895 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
11896 pMixedCtx->esp += cbParm;
11897 pMixedCtx->esp &= uMask;
11898 pMixedCtx->rip += pDis->cbInstr;
11899 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11900 | HM_CHANGED_GUEST_RSP
11901 | HM_CHANGED_GUEST_RFLAGS);
11902 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
11903 if (fStepping)
11904 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11905
11906 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
11907 break;
11908 }
11909
11910 case OP_PUSHF:
11911 {
11912 uint32_t cbParm;
11913 uint32_t uMask;
11914 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11915 {
11916 cbParm = 4;
11917 uMask = 0xffffffff;
11918 }
11919 else
11920 {
11921 cbParm = 2;
11922 uMask = 0xffff;
11923 }
11924
11925 /* Get the stack pointer & push the contents of eflags onto the stack. */
11926 RTGCPTR GCPtrStack = 0;
11927 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
11928 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
11929 if (RT_FAILURE(rc))
11930 {
11931 rc = VERR_EM_INTERPRETER;
11932 break;
11933 }
11934 X86EFLAGS Eflags = pMixedCtx->eflags;
11935 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
11936 Eflags.Bits.u1RF = 0;
11937 Eflags.Bits.u1VM = 0;
11938
11939 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
11940 if (RT_FAILURE(rc))
11941 {
11942 rc = VERR_EM_INTERPRETER;
11943 break;
11944 }
11945 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
11946 pMixedCtx->esp -= cbParm;
11947 pMixedCtx->esp &= uMask;
11948 pMixedCtx->rip += pDis->cbInstr;
11949 pMixedCtx->eflags.Bits.u1RF = 0;
11950 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11951 | HM_CHANGED_GUEST_RSP
11952 | HM_CHANGED_GUEST_RFLAGS);
11953 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11954 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
11955 break;
11956 }
11957
11958 case OP_IRET:
11959 {
11960 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
11961 * instruction reference. */
11962 RTGCPTR GCPtrStack = 0;
11963 uint32_t uMask = 0xffff;
11964 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11965 uint16_t aIretFrame[3];
11966 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
11967 {
11968 rc = VERR_EM_INTERPRETER;
11969 break;
11970 }
11971 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11972 &GCPtrStack);
11973 if (RT_SUCCESS(rc))
11974 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
11975 if (RT_FAILURE(rc))
11976 {
11977 rc = VERR_EM_INTERPRETER;
11978 break;
11979 }
11980 pMixedCtx->eip = 0;
11981 pMixedCtx->ip = aIretFrame[0];
11982 pMixedCtx->cs.Sel = aIretFrame[1];
11983 pMixedCtx->cs.ValidSel = aIretFrame[1];
11984 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
11985 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
11986 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
11987 pMixedCtx->sp += sizeof(aIretFrame);
11988 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11989 | HM_CHANGED_GUEST_SEGMENT_REGS
11990 | HM_CHANGED_GUEST_RSP
11991 | HM_CHANGED_GUEST_RFLAGS);
11992 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
11993 if (fStepping)
11994 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11995 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
11996 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
11997 break;
11998 }
11999
12000 case OP_INT:
12001 {
12002 uint16_t uVector = pDis->Param1.uValue & 0xff;
12003 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
12004 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12005 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12006 break;
12007 }
12008
12009 case OP_INTO:
12010 {
12011 if (pMixedCtx->eflags.Bits.u1OF)
12012 {
12013 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
12014 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12015 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12016 }
12017 else
12018 {
12019 pMixedCtx->eflags.Bits.u1RF = 0;
12020 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
12021 }
12022 break;
12023 }
12024
12025 default:
12026 {
12027 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
12028 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
12029 EMCODETYPE_SUPERVISOR);
12030 rc = VBOXSTRICTRC_VAL(rc2);
12031 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
12032 /** @todo We have to set pending-debug exceptions here when the guest is
12033 * single-stepping depending on the instruction that was interpreted. */
12034 Log4(("#GP rc=%Rrc\n", rc));
12035 break;
12036 }
12037 }
12038 }
12039 else
12040 rc = VERR_EM_INTERPRETER;
12041
12042 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
12043 ("#GP Unexpected rc=%Rrc\n", rc));
12044 return rc;
12045}
12046
12047
12048#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12049/**
12050 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
12051 * the exception reported in the VMX transient structure back into the VM.
12052 *
12053 * @remarks Requires uExitIntInfo in the VMX transient structure to be
12054 * up-to-date.
12055 */
12056static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12057{
12058 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12059
12060 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
12061 hmR0VmxCheckExitDueToEventDelivery(). */
12062 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12063 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12064 AssertRCReturn(rc, rc);
12065 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
12066
12067#ifdef DEBUG_ramshankar
12068 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12069 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12070 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
12071#endif
12072
12073 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12074 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12075 return VINF_SUCCESS;
12076}
12077#endif
12078
12079
12080/**
12081 * VM-exit exception handler for #PF (Page-fault exception).
12082 */
12083static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12084{
12085 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12086 PVM pVM = pVCpu->CTX_SUFF(pVM);
12087 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12088 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12089 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12090 AssertRCReturn(rc, rc);
12091
12092#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
12093 if (pVM->hm.s.fNestedPaging)
12094 {
12095 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
12096 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
12097 {
12098 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12099 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12100 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
12101 }
12102 else
12103 {
12104 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12105 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12106 Log4(("Pending #DF due to vectoring #PF. NP\n"));
12107 }
12108 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12109 return rc;
12110 }
12111#else
12112 Assert(!pVM->hm.s.fNestedPaging);
12113 NOREF(pVM);
12114#endif
12115
12116 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
12117 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
12118 if (pVmxTransient->fVectoringPF)
12119 {
12120 Assert(pVCpu->hm.s.Event.fPending);
12121 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12122 }
12123
12124 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12125 AssertRCReturn(rc, rc);
12126
12127 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
12128 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
12129
12130 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12131 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12132 (RTGCPTR)pVmxTransient->uExitQualification);
12133
12134 Log4(("#PF: rc=%Rrc\n", rc));
12135 if (rc == VINF_SUCCESS)
12136 {
12137 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
12138 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
12139 * memory? We don't update the whole state here... */
12140 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12141 | HM_CHANGED_GUEST_RSP
12142 | HM_CHANGED_GUEST_RFLAGS
12143 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12144 TRPMResetTrap(pVCpu);
12145 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12146 return rc;
12147 }
12148
12149 if (rc == VINF_EM_RAW_GUEST_TRAP)
12150 {
12151 if (!pVmxTransient->fVectoringDoublePF)
12152 {
12153 /* It's a guest page fault and needs to be reflected to the guest. */
12154 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12155 TRPMResetTrap(pVCpu);
12156 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12157 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12158 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12159 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
12160 }
12161 else
12162 {
12163 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12164 TRPMResetTrap(pVCpu);
12165 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12166 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12167 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
12168 }
12169
12170 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12171 return VINF_SUCCESS;
12172 }
12173
12174 TRPMResetTrap(pVCpu);
12175 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
12176 return rc;
12177}
12178
12179/** @} */
12180
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette