VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 97220

最後變更 在這個檔案從97220是 97220,由 vboxsync 提交於 2 年 前

VMM/CPUM: Access CPUMCTX::eflags via the 'u' member when possible in preparation for putting internal info in the reserved bits.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 98.3 KB
 
1/* $Id: CPUMAllRegs.cpp 97220 2022-10-18 22:50:03Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#ifdef IN_RING3
54# include <iprt/thread.h>
55#endif
56
57/** Disable stack frame pointer generation here. */
58#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
59# pragma optimize("y", off)
60#endif
61
62AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
63
64
65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
68/**
69 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
70 *
71 * @returns Pointer to the Virtual CPU.
72 * @param a_pGuestCtx Pointer to the guest context.
73 */
74#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
75
76/**
77 * Lazily loads the hidden parts of a selector register when using raw-mode.
78 */
79#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
80 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
81
82/** @def CPUM_INT_ASSERT_NOT_EXTRN
83 * Macro for asserting that @a a_fNotExtrn are present.
84 *
85 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
86 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
87 */
88#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
89 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
90 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
91
92
93VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
94{
95 pVCpu->cpum.s.Hyper.cr3 = cr3;
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104/** @def MAYBE_LOAD_DRx
105 * Macro for updating DRx values in raw-mode and ring-0 contexts.
106 */
107#ifdef IN_RING0
108# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
109#else
110# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
111#endif
112
113VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
114{
115 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
116 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
117}
118
119
120VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
121{
122 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
123 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
124}
125
126
127VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
128{
129 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
130 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
131}
132
133
134VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
135{
136 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
137 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
138}
139
140
141VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
142{
143 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
144}
145
146
147VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
148{
149 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
150}
151
152
153VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
154{
155 return pVCpu->cpum.s.Hyper.dr[0];
156}
157
158
159VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
160{
161 return pVCpu->cpum.s.Hyper.dr[1];
162}
163
164
165VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
166{
167 return pVCpu->cpum.s.Hyper.dr[2];
168}
169
170
171VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
172{
173 return pVCpu->cpum.s.Hyper.dr[3];
174}
175
176
177VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
178{
179 return pVCpu->cpum.s.Hyper.dr[6];
180}
181
182
183VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
184{
185 return pVCpu->cpum.s.Hyper.dr[7];
186}
187
188
189/**
190 * Queries the pointer to the internal CPUMCTX structure.
191 *
192 * @returns The CPUMCTX pointer.
193 * @param pVCpu The cross context virtual CPU structure.
194 */
195VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
196{
197 return &pVCpu->cpum.s.Guest;
198}
199
200
201/**
202 * Queries the pointer to the internal CPUMCTXMSRS structure.
203 *
204 * This is for NEM only.
205 *
206 * @returns The CPUMCTX pointer.
207 * @param pVCpu The cross context virtual CPU structure.
208 */
209VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
210{
211 return &pVCpu->cpum.s.GuestMsrs;
212}
213
214
215VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
216{
217 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
218 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
219 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
220 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
221 return VINF_SUCCESS; /* formality, consider it void. */
222}
223
224
225VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
226{
227 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
228 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
229 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
230 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
231 return VINF_SUCCESS; /* formality, consider it void. */
232}
233
234
235VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
236{
237 pVCpu->cpum.s.Guest.tr.Sel = tr;
238 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
239 return VINF_SUCCESS; /* formality, consider it void. */
240}
241
242
243VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
244{
245 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
246 /* The caller will set more hidden bits if it has them. */
247 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
248 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
249 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
250 return VINF_SUCCESS; /* formality, consider it void. */
251}
252
253
254/**
255 * Set the guest CR0.
256 *
257 * When called in GC, the hyper CR0 may be updated if that is
258 * required. The caller only has to take special action if AM,
259 * WP, PG or PE changes.
260 *
261 * @returns VINF_SUCCESS (consider it void).
262 * @param pVCpu The cross context virtual CPU structure.
263 * @param cr0 The new CR0 value.
264 */
265VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
266{
267 /*
268 * Check for changes causing TLB flushes (for REM).
269 * The caller is responsible for calling PGM when appropriate.
270 */
271 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
272 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
273 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
274 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
275
276 /*
277 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
278 */
279 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
280 PGMCr0WpEnabled(pVCpu);
281
282 /* The ET flag is settable on a 386 and hardwired on 486+. */
283 if ( !(cr0 & X86_CR0_ET)
284 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
285 cr0 |= X86_CR0_ET;
286
287 pVCpu->cpum.s.Guest.cr0 = cr0;
288 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
289 return VINF_SUCCESS;
290}
291
292
293VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
294{
295 pVCpu->cpum.s.Guest.cr2 = cr2;
296 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
297 return VINF_SUCCESS;
298}
299
300
301VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
302{
303 pVCpu->cpum.s.Guest.cr3 = cr3;
304 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
305 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
306 return VINF_SUCCESS;
307}
308
309
310VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
311{
312 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
313
314 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
315 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
316 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
317
318 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
319 pVCpu->cpum.s.Guest.cr4 = cr4;
320 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
321 return VINF_SUCCESS;
322}
323
324
325VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
326{
327 pVCpu->cpum.s.Guest.eflags.u = eflags;
328 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
329 return VINF_SUCCESS;
330}
331
332
333VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
334{
335 pVCpu->cpum.s.Guest.eip = eip;
336 return VINF_SUCCESS;
337}
338
339
340VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
341{
342 pVCpu->cpum.s.Guest.eax = eax;
343 return VINF_SUCCESS;
344}
345
346
347VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
348{
349 pVCpu->cpum.s.Guest.ebx = ebx;
350 return VINF_SUCCESS;
351}
352
353
354VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
355{
356 pVCpu->cpum.s.Guest.ecx = ecx;
357 return VINF_SUCCESS;
358}
359
360
361VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
362{
363 pVCpu->cpum.s.Guest.edx = edx;
364 return VINF_SUCCESS;
365}
366
367
368VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
369{
370 pVCpu->cpum.s.Guest.esp = esp;
371 return VINF_SUCCESS;
372}
373
374
375VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
376{
377 pVCpu->cpum.s.Guest.ebp = ebp;
378 return VINF_SUCCESS;
379}
380
381
382VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
383{
384 pVCpu->cpum.s.Guest.esi = esi;
385 return VINF_SUCCESS;
386}
387
388
389VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
390{
391 pVCpu->cpum.s.Guest.edi = edi;
392 return VINF_SUCCESS;
393}
394
395
396VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
397{
398 pVCpu->cpum.s.Guest.ss.Sel = ss;
399 return VINF_SUCCESS;
400}
401
402
403VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
404{
405 pVCpu->cpum.s.Guest.cs.Sel = cs;
406 return VINF_SUCCESS;
407}
408
409
410VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
411{
412 pVCpu->cpum.s.Guest.ds.Sel = ds;
413 return VINF_SUCCESS;
414}
415
416
417VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
418{
419 pVCpu->cpum.s.Guest.es.Sel = es;
420 return VINF_SUCCESS;
421}
422
423
424VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
425{
426 pVCpu->cpum.s.Guest.fs.Sel = fs;
427 return VINF_SUCCESS;
428}
429
430
431VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
432{
433 pVCpu->cpum.s.Guest.gs.Sel = gs;
434 return VINF_SUCCESS;
435}
436
437
438VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
439{
440 pVCpu->cpum.s.Guest.msrEFER = val;
441 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
442}
443
444
445VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
446{
447 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
448 if (pcbLimit)
449 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
450 return pVCpu->cpum.s.Guest.idtr.pIdt;
451}
452
453
454VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
455{
456 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
457 if (pHidden)
458 *pHidden = pVCpu->cpum.s.Guest.tr;
459 return pVCpu->cpum.s.Guest.tr.Sel;
460}
461
462
463VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
464{
465 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
466 return pVCpu->cpum.s.Guest.cs.Sel;
467}
468
469
470VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
471{
472 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
473 return pVCpu->cpum.s.Guest.ds.Sel;
474}
475
476
477VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
478{
479 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
480 return pVCpu->cpum.s.Guest.es.Sel;
481}
482
483
484VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
485{
486 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
487 return pVCpu->cpum.s.Guest.fs.Sel;
488}
489
490
491VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
492{
493 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
494 return pVCpu->cpum.s.Guest.gs.Sel;
495}
496
497
498VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
499{
500 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
501 return pVCpu->cpum.s.Guest.ss.Sel;
502}
503
504
505VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
506{
507 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
508 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
509 if ( !CPUMIsGuestInLongMode(pVCpu)
510 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
511 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
512 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
513}
514
515
516VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
517{
518 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
519 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
520 if ( !CPUMIsGuestInLongMode(pVCpu)
521 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
522 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
523 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
524}
525
526
527VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
528{
529 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
530 return pVCpu->cpum.s.Guest.ldtr.Sel;
531}
532
533
534VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
535{
536 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
537 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
538 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
539 return pVCpu->cpum.s.Guest.ldtr.Sel;
540}
541
542
543VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
544{
545 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
546 return pVCpu->cpum.s.Guest.cr0;
547}
548
549
550VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
551{
552 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
553 return pVCpu->cpum.s.Guest.cr2;
554}
555
556
557VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
558{
559 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
560 return pVCpu->cpum.s.Guest.cr3;
561}
562
563
564VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
565{
566 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
567 return pVCpu->cpum.s.Guest.cr4;
568}
569
570
571VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
572{
573 uint64_t u64;
574 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
575 if (RT_FAILURE(rc))
576 u64 = 0;
577 return u64;
578}
579
580
581VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
582{
583 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
584 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
585}
586
587
588VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
589{
590 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
591 return pVCpu->cpum.s.Guest.eip;
592}
593
594
595VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
596{
597 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
598 return pVCpu->cpum.s.Guest.rip;
599}
600
601
602VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
603{
604 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
605 return pVCpu->cpum.s.Guest.eax;
606}
607
608
609VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
610{
611 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
612 return pVCpu->cpum.s.Guest.ebx;
613}
614
615
616VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
617{
618 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
619 return pVCpu->cpum.s.Guest.ecx;
620}
621
622
623VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
624{
625 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
626 return pVCpu->cpum.s.Guest.edx;
627}
628
629
630VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
631{
632 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
633 return pVCpu->cpum.s.Guest.esi;
634}
635
636
637VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
638{
639 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
640 return pVCpu->cpum.s.Guest.edi;
641}
642
643
644VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
645{
646 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
647 return pVCpu->cpum.s.Guest.esp;
648}
649
650
651VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
652{
653 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
654 return pVCpu->cpum.s.Guest.ebp;
655}
656
657
658VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
659{
660 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
661 return pVCpu->cpum.s.Guest.eflags.u;
662}
663
664
665VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
666{
667 switch (iReg)
668 {
669 case DISCREG_CR0:
670 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
671 *pValue = pVCpu->cpum.s.Guest.cr0;
672 break;
673
674 case DISCREG_CR2:
675 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
676 *pValue = pVCpu->cpum.s.Guest.cr2;
677 break;
678
679 case DISCREG_CR3:
680 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
681 *pValue = pVCpu->cpum.s.Guest.cr3;
682 break;
683
684 case DISCREG_CR4:
685 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
686 *pValue = pVCpu->cpum.s.Guest.cr4;
687 break;
688
689 case DISCREG_CR8:
690 {
691 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
692 uint8_t u8Tpr;
693 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
694 if (RT_FAILURE(rc))
695 {
696 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
697 *pValue = 0;
698 return rc;
699 }
700 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
701 break;
702 }
703
704 default:
705 return VERR_INVALID_PARAMETER;
706 }
707 return VINF_SUCCESS;
708}
709
710
711VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
712{
713 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
714 return pVCpu->cpum.s.Guest.dr[0];
715}
716
717
718VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
719{
720 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
721 return pVCpu->cpum.s.Guest.dr[1];
722}
723
724
725VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
726{
727 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
728 return pVCpu->cpum.s.Guest.dr[2];
729}
730
731
732VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
733{
734 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
735 return pVCpu->cpum.s.Guest.dr[3];
736}
737
738
739VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
740{
741 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
742 return pVCpu->cpum.s.Guest.dr[6];
743}
744
745
746VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
747{
748 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
749 return pVCpu->cpum.s.Guest.dr[7];
750}
751
752
753VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
754{
755 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
756 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
757 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
758 if (iReg == 4 || iReg == 5)
759 iReg += 2;
760 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
761 return VINF_SUCCESS;
762}
763
764
765VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
766{
767 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
768 return pVCpu->cpum.s.Guest.msrEFER;
769}
770
771
772/**
773 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
774 *
775 * @returns Pointer to the leaf if found, NULL if not.
776 *
777 * @param pVM The cross context VM structure.
778 * @param uLeaf The leaf to get.
779 */
780PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
781{
782 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
783 if (iEnd)
784 {
785 unsigned iStart = 0;
786 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
787 for (;;)
788 {
789 unsigned i = iStart + (iEnd - iStart) / 2U;
790 if (uLeaf < paLeaves[i].uLeaf)
791 {
792 if (i <= iStart)
793 return NULL;
794 iEnd = i;
795 }
796 else if (uLeaf > paLeaves[i].uLeaf)
797 {
798 i += 1;
799 if (i >= iEnd)
800 return NULL;
801 iStart = i;
802 }
803 else
804 {
805 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
806 return &paLeaves[i];
807
808 /* This shouldn't normally happen. But in case the it does due
809 to user configuration overrids or something, just return the
810 first sub-leaf. */
811 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
812 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
813 while ( paLeaves[i].uSubLeaf != 0
814 && i > 0
815 && uLeaf == paLeaves[i - 1].uLeaf)
816 i--;
817 return &paLeaves[i];
818 }
819 }
820 }
821
822 return NULL;
823}
824
825
826/**
827 * Looks up a CPUID leaf in the CPUID leaf array.
828 *
829 * @returns Pointer to the leaf if found, NULL if not.
830 *
831 * @param pVM The cross context VM structure.
832 * @param uLeaf The leaf to get.
833 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
834 * isn't.
835 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
836 */
837PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
838{
839 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
840 if (iEnd)
841 {
842 unsigned iStart = 0;
843 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
844 for (;;)
845 {
846 unsigned i = iStart + (iEnd - iStart) / 2U;
847 if (uLeaf < paLeaves[i].uLeaf)
848 {
849 if (i <= iStart)
850 return NULL;
851 iEnd = i;
852 }
853 else if (uLeaf > paLeaves[i].uLeaf)
854 {
855 i += 1;
856 if (i >= iEnd)
857 return NULL;
858 iStart = i;
859 }
860 else
861 {
862 uSubLeaf &= paLeaves[i].fSubLeafMask;
863 if (uSubLeaf == paLeaves[i].uSubLeaf)
864 *pfExactSubLeafHit = true;
865 else
866 {
867 /* Find the right subleaf. We return the last one before
868 uSubLeaf if we don't find an exact match. */
869 if (uSubLeaf < paLeaves[i].uSubLeaf)
870 while ( i > 0
871 && uLeaf == paLeaves[i - 1].uLeaf
872 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
873 i--;
874 else
875 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
876 && uLeaf == paLeaves[i + 1].uLeaf
877 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
878 i++;
879 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
880 }
881 return &paLeaves[i];
882 }
883 }
884 }
885
886 *pfExactSubLeafHit = false;
887 return NULL;
888}
889
890
891/**
892 * Gets a CPUID leaf.
893 *
894 * @param pVCpu The cross context virtual CPU structure.
895 * @param uLeaf The CPUID leaf to get.
896 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
897 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
898 * not: 1=true, 0=false, 1=whatever. This affect how the
899 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
900 * Intel CPUs, where it's only returned in 64-bit mode.
901 * @param pEax Where to store the EAX value.
902 * @param pEbx Where to store the EBX value.
903 * @param pEcx Where to store the ECX value.
904 * @param pEdx Where to store the EDX value.
905 */
906VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
907 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
908{
909 bool fExactSubLeafHit;
910 PVM pVM = pVCpu->CTX_SUFF(pVM);
911 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
912 if (pLeaf)
913 {
914 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
915 if (fExactSubLeafHit)
916 {
917 *pEax = pLeaf->uEax;
918 *pEbx = pLeaf->uEbx;
919 *pEcx = pLeaf->uEcx;
920 *pEdx = pLeaf->uEdx;
921
922 /*
923 * Deal with CPU specific information.
924 */
925 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
926 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
927 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
928 {
929 if (uLeaf == 1)
930 {
931 /* EBX: Bits 31-24: Initial APIC ID. */
932 Assert(pVCpu->idCpu <= 255);
933 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
934 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
935
936 /* EDX: Bit 9: AND with APICBASE.EN. */
937 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
938 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
939
940 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
941 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
942 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
943 }
944 else if (uLeaf == 0xb)
945 {
946 /* EDX: Initial extended APIC ID. */
947 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
948 *pEdx = pVCpu->idCpu;
949 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
950 }
951 else if (uLeaf == UINT32_C(0x8000001e))
952 {
953 /* EAX: Initial extended APIC ID. */
954 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
955 *pEax = pVCpu->idCpu;
956 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
957 }
958 else if (uLeaf == UINT32_C(0x80000001))
959 {
960 /* EDX: Bit 9: AND with APICBASE.EN. */
961 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
962 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
963 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
964 }
965 else
966 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
967 }
968
969 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
970 if ( uLeaf == UINT32_C(0x80000001)
971 && f64BitMode == false
972 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
973 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
974 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
975 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
976 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
977
978 }
979 /*
980 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
981 * them here, but we do the best we can here...
982 */
983 else
984 {
985 *pEax = *pEbx = *pEcx = *pEdx = 0;
986 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
987 {
988 *pEcx = uSubLeaf & 0xff;
989 *pEdx = pVCpu->idCpu;
990 }
991 }
992 }
993 else
994 {
995 /*
996 * Different CPUs have different ways of dealing with unknown CPUID leaves.
997 */
998 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
999 {
1000 default:
1001 AssertFailed();
1002 RT_FALL_THRU();
1003 case CPUMUNKNOWNCPUID_DEFAULTS:
1004 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1005 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1006 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1007 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1008 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1009 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1010 break;
1011 case CPUMUNKNOWNCPUID_PASSTHRU:
1012 *pEax = uLeaf;
1013 *pEbx = 0;
1014 *pEcx = uSubLeaf;
1015 *pEdx = 0;
1016 break;
1017 }
1018 }
1019 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1020}
1021
1022
1023/**
1024 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1025 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1026 *
1027 * @returns Previous value.
1028 * @param pVCpu The cross context virtual CPU structure to make the
1029 * change on. Usually the calling EMT.
1030 * @param fVisible Whether to make it visible (true) or hide it (false).
1031 *
1032 * @remarks This is "VMMDECL" so that it still links with
1033 * the old APIC code which is in VBoxDD2 and not in
1034 * the VMM module.
1035 */
1036VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1037{
1038 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1039 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1040 return fOld;
1041}
1042
1043
1044/**
1045 * Gets the host CPU vendor.
1046 *
1047 * @returns CPU vendor.
1048 * @param pVM The cross context VM structure.
1049 */
1050VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1051{
1052 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1053}
1054
1055
1056/**
1057 * Gets the host CPU microarchitecture.
1058 *
1059 * @returns CPU microarchitecture.
1060 * @param pVM The cross context VM structure.
1061 */
1062VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1063{
1064 return pVM->cpum.s.HostFeatures.enmMicroarch;
1065}
1066
1067
1068/**
1069 * Gets the guest CPU vendor.
1070 *
1071 * @returns CPU vendor.
1072 * @param pVM The cross context VM structure.
1073 */
1074VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1075{
1076 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1077}
1078
1079
1080/**
1081 * Gets the guest CPU microarchitecture.
1082 *
1083 * @returns CPU microarchitecture.
1084 * @param pVM The cross context VM structure.
1085 */
1086VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1087{
1088 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1089}
1090
1091
1092/**
1093 * Gets the maximum number of physical and linear address bits supported by the
1094 * guest.
1095 *
1096 * @param pVM The cross context VM structure.
1097 * @param pcPhysAddrWidth Where to store the physical address width.
1098 * @param pcLinearAddrWidth Where to store the linear address width.
1099 */
1100VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1101{
1102 AssertPtr(pVM);
1103 AssertReturnVoid(pcPhysAddrWidth);
1104 AssertReturnVoid(pcLinearAddrWidth);
1105 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1106 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1107}
1108
1109
1110VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1111{
1112 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1113 return CPUMRecalcHyperDRx(pVCpu, 0);
1114}
1115
1116
1117VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1118{
1119 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1120 return CPUMRecalcHyperDRx(pVCpu, 1);
1121}
1122
1123
1124VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1125{
1126 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1127 return CPUMRecalcHyperDRx(pVCpu, 2);
1128}
1129
1130
1131VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1132{
1133 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1134 return CPUMRecalcHyperDRx(pVCpu, 3);
1135}
1136
1137
1138VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1139{
1140 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1141 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1142 return VINF_SUCCESS; /* No need to recalc. */
1143}
1144
1145
1146VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1147{
1148 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1149 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1150 return CPUMRecalcHyperDRx(pVCpu, 7);
1151}
1152
1153
1154VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1155{
1156 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1157 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1158 if (iReg == 4 || iReg == 5)
1159 iReg += 2;
1160 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1161 return CPUMRecalcHyperDRx(pVCpu, iReg);
1162}
1163
1164
1165/**
1166 * Recalculates the hypervisor DRx register values based on current guest
1167 * registers and DBGF breakpoints, updating changed registers depending on the
1168 * context.
1169 *
1170 * This is called whenever a guest DRx register is modified (any context) and
1171 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1172 *
1173 * In raw-mode context this function will reload any (hyper) DRx registers which
1174 * comes out with a different value. It may also have to save the host debug
1175 * registers if that haven't been done already. In this context though, we'll
1176 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1177 * are only important when breakpoints are actually enabled.
1178 *
1179 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1180 * reloaded by the HM code if it changes. Further more, we will only use the
1181 * combined register set when the VBox debugger is actually using hardware BPs,
1182 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1183 * concern us here).
1184 *
1185 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1186 * all the time.
1187 *
1188 * @returns VINF_SUCCESS.
1189 * @param pVCpu The cross context virtual CPU structure.
1190 * @param iGstReg The guest debug register number that was modified.
1191 * UINT8_MAX if not guest register.
1192 */
1193VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
1194{
1195 PVM pVM = pVCpu->CTX_SUFF(pVM);
1196#ifndef IN_RING0
1197 RT_NOREF_PV(iGstReg);
1198#endif
1199
1200 /*
1201 * Compare the DR7s first.
1202 *
1203 * We only care about the enabled flags. GD is virtualized when we
1204 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1205 * always have the LE and GE bits set, so no need to check and disable
1206 * stuff if they're cleared like we have to for the guest DR7.
1207 */
1208 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1209 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1210 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1211 uGstDr7 = 0;
1212 else if (!(uGstDr7 & X86_DR7_LE))
1213 uGstDr7 &= ~X86_DR7_LE_ALL;
1214 else if (!(uGstDr7 & X86_DR7_GE))
1215 uGstDr7 &= ~X86_DR7_GE_ALL;
1216
1217 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1218 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1219 {
1220 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1221
1222 /*
1223 * Ok, something is enabled. Recalc each of the breakpoints, taking
1224 * the VM debugger ones of the guest ones. In raw-mode context we will
1225 * not allow breakpoints with values inside the hypervisor area.
1226 */
1227 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1228
1229 /* bp 0 */
1230 RTGCUINTREG uNewDr0;
1231 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1232 {
1233 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1234 uNewDr0 = DBGFBpGetDR0(pVM);
1235 }
1236 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1237 {
1238 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1239 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1240 }
1241 else
1242 uNewDr0 = 0;
1243
1244 /* bp 1 */
1245 RTGCUINTREG uNewDr1;
1246 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1247 {
1248 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1249 uNewDr1 = DBGFBpGetDR1(pVM);
1250 }
1251 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1252 {
1253 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1254 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1255 }
1256 else
1257 uNewDr1 = 0;
1258
1259 /* bp 2 */
1260 RTGCUINTREG uNewDr2;
1261 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1262 {
1263 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1264 uNewDr2 = DBGFBpGetDR2(pVM);
1265 }
1266 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1267 {
1268 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1269 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1270 }
1271 else
1272 uNewDr2 = 0;
1273
1274 /* bp 3 */
1275 RTGCUINTREG uNewDr3;
1276 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1277 {
1278 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1279 uNewDr3 = DBGFBpGetDR3(pVM);
1280 }
1281 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1282 {
1283 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1284 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1285 }
1286 else
1287 uNewDr3 = 0;
1288
1289 /*
1290 * Apply the updates.
1291 */
1292 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1293 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1294 CPUMSetHyperDR3(pVCpu, uNewDr3);
1295 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1296 CPUMSetHyperDR2(pVCpu, uNewDr2);
1297 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1298 CPUMSetHyperDR1(pVCpu, uNewDr1);
1299 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1300 CPUMSetHyperDR0(pVCpu, uNewDr0);
1301 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1302 CPUMSetHyperDR7(pVCpu, uNewDr7);
1303 }
1304#ifdef IN_RING0
1305 else if (CPUMIsGuestDebugStateActive(pVCpu))
1306 {
1307 /*
1308 * Reload the register that was modified. Normally this won't happen
1309 * as we won't intercept DRx writes when not having the hyper debug
1310 * state loaded, but in case we do for some reason we'll simply deal
1311 * with it.
1312 */
1313 switch (iGstReg)
1314 {
1315 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1316 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1317 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1318 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1319 default:
1320 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1321 }
1322 }
1323#endif
1324 else
1325 {
1326 /*
1327 * No active debug state any more. In raw-mode this means we have to
1328 * make sure DR7 has everything disabled now, if we armed it already.
1329 * In ring-0 we might end up here when just single stepping.
1330 */
1331#ifdef IN_RING0
1332 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1333 {
1334 if (pVCpu->cpum.s.Hyper.dr[0])
1335 ASMSetDR0(0);
1336 if (pVCpu->cpum.s.Hyper.dr[1])
1337 ASMSetDR1(0);
1338 if (pVCpu->cpum.s.Hyper.dr[2])
1339 ASMSetDR2(0);
1340 if (pVCpu->cpum.s.Hyper.dr[3])
1341 ASMSetDR3(0);
1342 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1343 }
1344#endif
1345 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1346
1347 /* Clear all the registers. */
1348 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1349 pVCpu->cpum.s.Hyper.dr[3] = 0;
1350 pVCpu->cpum.s.Hyper.dr[2] = 0;
1351 pVCpu->cpum.s.Hyper.dr[1] = 0;
1352 pVCpu->cpum.s.Hyper.dr[0] = 0;
1353
1354 }
1355 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1356 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1357 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1358 pVCpu->cpum.s.Hyper.dr[7]));
1359
1360 return VINF_SUCCESS;
1361}
1362
1363
1364/**
1365 * Set the guest XCR0 register.
1366 *
1367 * Will load additional state if the FPU state is already loaded (in ring-0 &
1368 * raw-mode context).
1369 *
1370 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1371 * value.
1372 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1373 * @param uNewValue The new value.
1374 * @thread EMT(pVCpu)
1375 */
1376VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1377{
1378 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1379 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1380 /* The X87 bit cannot be cleared. */
1381 && (uNewValue & XSAVE_C_X87)
1382 /* AVX requires SSE. */
1383 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1384 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1385 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1386 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1387 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1388 )
1389 {
1390 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1391
1392 /* If more state components are enabled, we need to take care to load
1393 them if the FPU/SSE state is already loaded. May otherwise leak
1394 host state to the guest. */
1395 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1396 if (fNewComponents)
1397 {
1398#ifdef IN_RING0
1399 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1400 {
1401 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1402 /* Adding more components. */
1403 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
1404 else
1405 {
1406 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1407 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1408 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1409 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1410 }
1411 }
1412#endif
1413 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1414 }
1415 return VINF_SUCCESS;
1416 }
1417 return VERR_CPUM_RAISE_GP_0;
1418}
1419
1420
1421/**
1422 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1423 *
1424 * @returns true if in real mode, otherwise false.
1425 * @param pVCpu The cross context virtual CPU structure.
1426 */
1427VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1428{
1429 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1430 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1431}
1432
1433
1434/**
1435 * Tests if the guest has the Page Size Extension enabled (PSE).
1436 *
1437 * @returns true if in real mode, otherwise false.
1438 * @param pVCpu The cross context virtual CPU structure.
1439 */
1440VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1441{
1442 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1443 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1444 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1445}
1446
1447
1448/**
1449 * Tests if the guest has the paging enabled (PG).
1450 *
1451 * @returns true if in real mode, otherwise false.
1452 * @param pVCpu The cross context virtual CPU structure.
1453 */
1454VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1455{
1456 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1457 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1458}
1459
1460
1461/**
1462 * Tests if the guest has the paging enabled (PG).
1463 *
1464 * @returns true if in real mode, otherwise false.
1465 * @param pVCpu The cross context virtual CPU structure.
1466 */
1467VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1468{
1469 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1470 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1471}
1472
1473
1474/**
1475 * Tests if the guest is running in real mode or not.
1476 *
1477 * @returns true if in real mode, otherwise false.
1478 * @param pVCpu The cross context virtual CPU structure.
1479 */
1480VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1481{
1482 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1483 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1484}
1485
1486
1487/**
1488 * Tests if the guest is running in real or virtual 8086 mode.
1489 *
1490 * @returns @c true if it is, @c false if not.
1491 * @param pVCpu The cross context virtual CPU structure.
1492 */
1493VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1494{
1495 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1496 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1497 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1498}
1499
1500
1501/**
1502 * Tests if the guest is running in protected or not.
1503 *
1504 * @returns true if in protected mode, otherwise false.
1505 * @param pVCpu The cross context virtual CPU structure.
1506 */
1507VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1508{
1509 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1510 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1511}
1512
1513
1514/**
1515 * Tests if the guest is running in paged protected or not.
1516 *
1517 * @returns true if in paged protected mode, otherwise false.
1518 * @param pVCpu The cross context virtual CPU structure.
1519 */
1520VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1521{
1522 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1523 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1524}
1525
1526
1527/**
1528 * Tests if the guest is running in long mode or not.
1529 *
1530 * @returns true if in long mode, otherwise false.
1531 * @param pVCpu The cross context virtual CPU structure.
1532 */
1533VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1534{
1535 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1536 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1537}
1538
1539
1540/**
1541 * Tests if the guest is running in PAE mode or not.
1542 *
1543 * @returns true if in PAE mode, otherwise false.
1544 * @param pVCpu The cross context virtual CPU structure.
1545 */
1546VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1547{
1548 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1549 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1550 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1551 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1552 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1553 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1554}
1555
1556
1557/**
1558 * Tests if the guest is running in 64 bits mode or not.
1559 *
1560 * @returns true if in 64 bits protected mode, otherwise false.
1561 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1562 */
1563VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1564{
1565 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1566 if (!CPUMIsGuestInLongMode(pVCpu))
1567 return false;
1568 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1569 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1570}
1571
1572
1573/**
1574 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1575 * registers.
1576 *
1577 * @returns true if in 64 bits protected mode, otherwise false.
1578 * @param pCtx Pointer to the current guest CPU context.
1579 */
1580VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1581{
1582 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1583}
1584
1585
1586/**
1587 * Sets the specified changed flags (CPUM_CHANGED_*).
1588 *
1589 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1590 * @param fChangedAdd The changed flags to add.
1591 */
1592VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1593{
1594 pVCpu->cpum.s.fChanged |= fChangedAdd;
1595}
1596
1597
1598/**
1599 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1600 *
1601 * @returns true if supported.
1602 * @returns false if not supported.
1603 * @param pVM The cross context VM structure.
1604 */
1605VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1606{
1607 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1608}
1609
1610
1611/**
1612 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1613 * @returns true if used.
1614 * @returns false if not used.
1615 * @param pVM The cross context VM structure.
1616 */
1617VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1618{
1619 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1620}
1621
1622
1623/**
1624 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1625 * @returns true if used.
1626 * @returns false if not used.
1627 * @param pVM The cross context VM structure.
1628 */
1629VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1630{
1631 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1632}
1633
1634
1635/**
1636 * Checks if we activated the FPU/XMM state of the guest OS.
1637 *
1638 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1639 * the next time we'll be executing guest code, so it may return true for
1640 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1641 * it to be loaded the next time we go thru the world switcher
1642 * (CPUM_SYNC_FPU_STATE).
1643 *
1644 * @returns true / false.
1645 * @param pVCpu The cross context virtual CPU structure.
1646 */
1647VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1648{
1649 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1650 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1651 return fRet;
1652}
1653
1654
1655/**
1656 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1657 *
1658 * @returns true / false.
1659 * @param pVCpu The cross context virtual CPU structure.
1660 */
1661VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1662{
1663 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1664 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1665 return fRet;
1666}
1667
1668
1669/**
1670 * Checks if we saved the FPU/XMM state of the host OS.
1671 *
1672 * @returns true / false.
1673 * @param pVCpu The cross context virtual CPU structure.
1674 */
1675VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1676{
1677 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1678}
1679
1680
1681/**
1682 * Checks if the guest debug state is active.
1683 *
1684 * @returns boolean
1685 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1686 */
1687VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1688{
1689 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1690}
1691
1692
1693/**
1694 * Checks if the hyper debug state is active.
1695 *
1696 * @returns boolean
1697 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1698 */
1699VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1700{
1701 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1702}
1703
1704
1705/**
1706 * Mark the guest's debug state as inactive.
1707 *
1708 * @returns boolean
1709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1710 * @todo This API doesn't make sense any more.
1711 */
1712VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1713{
1714 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1715 NOREF(pVCpu);
1716}
1717
1718
1719/**
1720 * Get the current privilege level of the guest.
1721 *
1722 * @returns CPL
1723 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1724 */
1725VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1726{
1727 /*
1728 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1729 *
1730 * Note! We used to check CS.DPL here, assuming it was always equal to
1731 * CPL even if a conforming segment was loaded. But this turned out to
1732 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1733 * during install after a far call to ring 2 with VT-x. Then on newer
1734 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1735 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1736 *
1737 * So, forget CS.DPL, always use SS.DPL.
1738 *
1739 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1740 * isn't necessarily equal if the segment is conforming.
1741 * See section 4.11.1 in the AMD manual.
1742 *
1743 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1744 * right after real->prot mode switch and when in V8086 mode? That
1745 * section says the RPL specified in a direct transfere (call, jmp,
1746 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1747 * it would be impossible for an exception handle or the iret
1748 * instruction to figure out whether SS:ESP are part of the frame
1749 * or not. VBox or qemu bug must've lead to this misconception.
1750 *
1751 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1752 * selector into SS with an RPL other than the CPL when CPL != 3 and
1753 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1754 * RPL = CPL. Weird.
1755 */
1756 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1757 uint32_t uCpl;
1758 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1759 {
1760 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1761 {
1762 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1763 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1764 else
1765 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1766 }
1767 else
1768 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1769 }
1770 else
1771 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1772 return uCpl;
1773}
1774
1775
1776/**
1777 * Gets the current guest CPU mode.
1778 *
1779 * If paging mode is what you need, check out PGMGetGuestMode().
1780 *
1781 * @returns The CPU mode.
1782 * @param pVCpu The cross context virtual CPU structure.
1783 */
1784VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1785{
1786 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1787 CPUMMODE enmMode;
1788 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1789 enmMode = CPUMMODE_REAL;
1790 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1791 enmMode = CPUMMODE_PROTECTED;
1792 else
1793 enmMode = CPUMMODE_LONG;
1794
1795 return enmMode;
1796}
1797
1798
1799/**
1800 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1801 *
1802 * @returns 16, 32 or 64.
1803 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1804 */
1805VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1806{
1807 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1808
1809 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1810 return 16;
1811
1812 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1813 {
1814 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1815 return 16;
1816 }
1817
1818 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1819 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1820 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1821 return 64;
1822
1823 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1824 return 32;
1825
1826 return 16;
1827}
1828
1829
1830VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1831{
1832 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1833
1834 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1835 return DISCPUMODE_16BIT;
1836
1837 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1838 {
1839 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1840 return DISCPUMODE_16BIT;
1841 }
1842
1843 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1844 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1845 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1846 return DISCPUMODE_64BIT;
1847
1848 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1849 return DISCPUMODE_32BIT;
1850
1851 return DISCPUMODE_16BIT;
1852}
1853
1854
1855/**
1856 * Gets the guest MXCSR_MASK value.
1857 *
1858 * This does not access the x87 state, but the value we determined at VM
1859 * initialization.
1860 *
1861 * @returns MXCSR mask.
1862 * @param pVM The cross context VM structure.
1863 */
1864VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1865{
1866 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1867}
1868
1869
1870/**
1871 * Returns whether the guest has physical interrupts enabled.
1872 *
1873 * @returns @c true if interrupts are enabled, @c false otherwise.
1874 * @param pVCpu The cross context virtual CPU structure.
1875 *
1876 * @remarks Warning! This function does -not- take into account the global-interrupt
1877 * flag (GIF).
1878 */
1879VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1880{
1881 switch (CPUMGetGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1882 {
1883 case CPUMHWVIRT_NONE:
1884 default:
1885 return pVCpu->cpum.s.Guest.eflags.Bits.u1IF;
1886 case CPUMHWVIRT_VMX:
1887 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
1888 case CPUMHWVIRT_SVM:
1889 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1890 }
1891}
1892
1893
1894/**
1895 * Returns whether the nested-guest has virtual interrupts enabled.
1896 *
1897 * @returns @c true if interrupts are enabled, @c false otherwise.
1898 * @param pVCpu The cross context virtual CPU structure.
1899 *
1900 * @remarks Warning! This function does -not- take into account the global-interrupt
1901 * flag (GIF).
1902 */
1903VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1904{
1905 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1906 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
1907
1908 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1909 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
1910
1911 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1912 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
1913}
1914
1915
1916/**
1917 * Calculates the interruptiblity of the guest.
1918 *
1919 * @returns Interruptibility level.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 */
1922VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1923{
1924#if 1
1925 /* Global-interrupt flag blocks pretty much everything we care about here. */
1926 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1927 {
1928 /*
1929 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1930 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1931 * or raw-mode). Hence we use the function below which handles the details.
1932 */
1933 if ( pVCpu->cpum.s.Guest.fInhibit == 0
1934 || ( !(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI)
1935 && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
1936 {
1937 /** @todo OPT: this next call should be inlined! */
1938 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
1939 {
1940 /** @todo OPT: type this out as it repeats tests. */
1941 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1942 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1943 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1944
1945 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1946 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1947 }
1948 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1949 }
1950
1951 /*
1952 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1953 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1954 * However, there is some uncertainity regarding the converse, i.e. whether
1955 * NMI-blocking until IRET blocks delivery of physical interrupts.
1956 *
1957 * See Intel spec. 25.4.1 "Event Blocking".
1958 */
1959 /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
1960 * 25.4.1 is only applicable to VMX non-root mode. In root mode /
1961 * non-VMX mode, I have not see any evidence in the intel manuals that
1962 * NMIs are not blocked when in an interrupt shadow. Section "6.7
1963 * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
1964 */
1965 if (!(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI))
1966 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1967 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1968 }
1969 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1970#else
1971 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1972 {
1973 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1974 {
1975 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1976 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1977
1978 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1979 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1980 {
1981 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1982 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1983 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1984 }
1985 AssertFailed();
1986 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1987 }
1988 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1989 }
1990 else
1991 {
1992 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1993 {
1994 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1995 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1996 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1997 }
1998 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1999 }
2000#endif
2001}
2002
2003
2004/**
2005 * Checks whether the SVM nested-guest has physical interrupts enabled.
2006 *
2007 * @returns true if interrupts are enabled, false otherwise.
2008 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2009 * @param pCtx The guest-CPU context.
2010 *
2011 * @remarks This does -not- take into account the global-interrupt flag.
2012 */
2013VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2014{
2015 /** @todo Optimization: Avoid this function call and use a pointer to the
2016 * relevant eflags instead (setup during VMRUN instruction emulation). */
2017 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2018
2019 X86EFLAGS fEFlags;
2020 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2021 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2022 else
2023 fEFlags.u = pCtx->eflags.u;
2024
2025 return fEFlags.Bits.u1IF;
2026}
2027
2028
2029/**
2030 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2031 * for injection by VMRUN instruction) interrupts.
2032 *
2033 * @returns VBox status code.
2034 * @retval true if it's ready, false otherwise.
2035 *
2036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2037 * @param pCtx The guest-CPU context.
2038 */
2039VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2040{
2041 RT_NOREF(pVCpu);
2042 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2043
2044 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
2045 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2046 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2047 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2048 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2049 return false;
2050
2051 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2052}
2053
2054
2055/**
2056 * Gets the pending SVM nested-guest interruptvector.
2057 *
2058 * @returns The nested-guest interrupt to inject.
2059 * @param pCtx The guest-CPU context.
2060 */
2061VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2062{
2063 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
2064}
2065
2066
2067/**
2068 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2069 *
2070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2071 * @param pCtx The guest-CPU context.
2072 */
2073VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2074{
2075 /*
2076 * Reload the guest's "host state".
2077 */
2078 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2079 pCtx->es = pHostState->es;
2080 pCtx->cs = pHostState->cs;
2081 pCtx->ss = pHostState->ss;
2082 pCtx->ds = pHostState->ds;
2083 pCtx->gdtr = pHostState->gdtr;
2084 pCtx->idtr = pHostState->idtr;
2085 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2086 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2087 pCtx->cr3 = pHostState->uCr3;
2088 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2089 pCtx->rflags.u = pHostState->rflags.u;
2090 pCtx->rflags.Bits.u1VM = 0;
2091 pCtx->rip = pHostState->uRip;
2092 pCtx->rsp = pHostState->uRsp;
2093 pCtx->rax = pHostState->uRax;
2094 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2095 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2096 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2097
2098 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2099 * raise \#GP(0) in the guest. */
2100
2101 /** @todo check the loaded host-state for consistency. Figure out what
2102 * exactly this involves? */
2103}
2104
2105
2106/**
2107 * Saves the host-state to the host-state save area as part of a VMRUN.
2108 *
2109 * @param pCtx The guest-CPU context.
2110 * @param cbInstr The length of the VMRUN instruction in bytes.
2111 */
2112VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2113{
2114 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2115 pHostState->es = pCtx->es;
2116 pHostState->cs = pCtx->cs;
2117 pHostState->ss = pCtx->ss;
2118 pHostState->ds = pCtx->ds;
2119 pHostState->gdtr = pCtx->gdtr;
2120 pHostState->idtr = pCtx->idtr;
2121 pHostState->uEferMsr = pCtx->msrEFER;
2122 pHostState->uCr0 = pCtx->cr0;
2123 pHostState->uCr3 = pCtx->cr3;
2124 pHostState->uCr4 = pCtx->cr4;
2125 pHostState->rflags.u = pCtx->rflags.u;
2126 pHostState->uRip = pCtx->rip + cbInstr;
2127 pHostState->uRsp = pCtx->rsp;
2128 pHostState->uRax = pCtx->rax;
2129}
2130
2131
2132/**
2133 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2134 * nested-guest.
2135 *
2136 * @returns The TSC offset after applying any nested-guest TSC offset.
2137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2138 * @param uTscValue The guest TSC.
2139 *
2140 * @sa CPUMRemoveNestedGuestTscOffset.
2141 */
2142VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2143{
2144 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2145 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2146 {
2147 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2148 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2149 return uTscValue;
2150 }
2151
2152 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2153 {
2154 uint64_t offTsc;
2155 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2156 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2157 return uTscValue + offTsc;
2158 }
2159 return uTscValue;
2160}
2161
2162
2163/**
2164 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2165 * guest.
2166 *
2167 * @returns The TSC offset after removing any nested-guest TSC offset.
2168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2169 * @param uTscValue The nested-guest TSC.
2170 *
2171 * @sa CPUMApplyNestedGuestTscOffset.
2172 */
2173VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2174{
2175 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2176 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2177 {
2178 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2179 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2180 return uTscValue;
2181 }
2182
2183 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2184 {
2185 uint64_t offTsc;
2186 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2187 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2188 return uTscValue - offTsc;
2189 }
2190 return uTscValue;
2191}
2192
2193
2194/**
2195 * Used to dynamically imports state residing in NEM or HM.
2196 *
2197 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2198 *
2199 * @returns VBox status code.
2200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2201 * @param fExtrnImport The fields to import.
2202 * @thread EMT(pVCpu)
2203 */
2204VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2205{
2206 VMCPU_ASSERT_EMT(pVCpu);
2207 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2208 {
2209 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2210 {
2211 case CPUMCTX_EXTRN_KEEPER_NEM:
2212 {
2213 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2214 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2215 return rc;
2216 }
2217
2218 case CPUMCTX_EXTRN_KEEPER_HM:
2219 {
2220#ifdef IN_RING0
2221 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2222 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2223 return rc;
2224#else
2225 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2226 return VINF_SUCCESS;
2227#endif
2228 }
2229 default:
2230 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2231 }
2232 }
2233 return VINF_SUCCESS;
2234}
2235
2236
2237/**
2238 * Gets valid CR4 bits for the guest.
2239 *
2240 * @returns Valid CR4 bits.
2241 * @param pVM The cross context VM structure.
2242 */
2243VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2244{
2245 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2246 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2247 | X86_CR4_TSD | X86_CR4_DE
2248 | X86_CR4_MCE | X86_CR4_PCE;
2249 if (pGuestFeatures->fPae)
2250 fMask |= X86_CR4_PAE;
2251 if (pGuestFeatures->fPge)
2252 fMask |= X86_CR4_PGE;
2253 if (pGuestFeatures->fPse)
2254 fMask |= X86_CR4_PSE;
2255 if (pGuestFeatures->fFxSaveRstor)
2256 fMask |= X86_CR4_OSFXSR;
2257 if (pGuestFeatures->fVmx)
2258 fMask |= X86_CR4_VMXE;
2259 if (pGuestFeatures->fXSaveRstor)
2260 fMask |= X86_CR4_OSXSAVE;
2261 if (pGuestFeatures->fPcid)
2262 fMask |= X86_CR4_PCIDE;
2263 if (pGuestFeatures->fFsGsBase)
2264 fMask |= X86_CR4_FSGSBASE;
2265 if (pGuestFeatures->fSse)
2266 fMask |= X86_CR4_OSXMMEEXCPT;
2267 return fMask;
2268}
2269
2270
2271/**
2272 * Sets the PAE PDPEs for the guest.
2273 *
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param paPaePdpes The PAE PDPEs to set.
2276 */
2277VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2278{
2279 Assert(paPaePdpes);
2280 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2281 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2282 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2283}
2284
2285
2286/**
2287 * Gets the PAE PDPTEs for the guest.
2288 *
2289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2290 * @param paPaePdpes Where to store the PAE PDPEs.
2291 */
2292VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2293{
2294 Assert(paPaePdpes);
2295 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2296 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2297 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2298}
2299
2300
2301/**
2302 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2303 *
2304 * @returns VBox status code.
2305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2306 * @param uTimer The VMCS preemption timer value.
2307 * @param cShift The VMX-preemption timer shift (usually based on guest
2308 * VMX MSR rate).
2309 * @param pu64EntryTick Where to store the current tick when the timer is
2310 * programmed.
2311 * @thread EMT(pVCpu)
2312 */
2313VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2314{
2315 Assert(uTimer);
2316 Assert(cShift <= 31);
2317 Assert(pu64EntryTick);
2318 VMCPU_ASSERT_EMT(pVCpu);
2319 uint64_t const cTicksToNext = uTimer << cShift;
2320 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
2321}
2322
2323
2324/**
2325 * Stops the VMX-preemption timer from firing.
2326 *
2327 * @returns VBox status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @thread EMT.
2330 *
2331 * @remarks This can be called during VM reset, so we cannot assume it will be on
2332 * the EMT corresponding to @c pVCpu.
2333 */
2334VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2335{
2336 /*
2337 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
2338 * However, we still get called during CPUMR3Init() and hence we need to check if we have
2339 * a valid timer object before trying to stop it.
2340 */
2341 int rc;
2342 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2343 if (hTimer != NIL_TMTIMERHANDLE)
2344 {
2345 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2346 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2347 if (rc == VINF_SUCCESS)
2348 {
2349 if (TMTimerIsActive(pVM, hTimer))
2350 TMTimerStop(pVM, hTimer);
2351 TMTimerUnlock(pVM, hTimer);
2352 }
2353 }
2354 else
2355 rc = VERR_NOT_FOUND;
2356 return rc;
2357}
2358
2359
2360/**
2361 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2362 *
2363 * @returns VMXMSRPM_XXX - the MSR permission.
2364 * @param pvMsrBitmap Pointer to the MSR bitmap.
2365 * @param idMsr The MSR to get permissions for.
2366 *
2367 * @sa hmR0VmxSetMsrPermission.
2368 */
2369VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2370{
2371 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2372
2373 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2374
2375 /*
2376 * MSR Layout:
2377 * Byte index MSR range Interpreted as
2378 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2379 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2380 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2381 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2382 *
2383 * A bit corresponding to an MSR within the above range causes a VM-exit
2384 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2385 * the MSR range, it always cause a VM-exit.
2386 *
2387 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2388 */
2389 uint32_t const offBitmapRead = 0;
2390 uint32_t const offBitmapWrite = 0x800;
2391 uint32_t offMsr;
2392 uint32_t iBit;
2393 if (idMsr <= UINT32_C(0x00001fff))
2394 {
2395 offMsr = 0;
2396 iBit = idMsr;
2397 }
2398 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2399 {
2400 offMsr = 0x400;
2401 iBit = idMsr - UINT32_C(0xc0000000);
2402 }
2403 else
2404 {
2405 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2406 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2407 }
2408
2409 /*
2410 * Get the MSR read permissions.
2411 */
2412 uint32_t fRet;
2413 uint32_t const offMsrRead = offBitmapRead + offMsr;
2414 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2415 if (ASMBitTest(pbMsrBitmap, (offMsrRead << 3) + iBit))
2416 fRet = VMXMSRPM_EXIT_RD;
2417 else
2418 fRet = VMXMSRPM_ALLOW_RD;
2419
2420 /*
2421 * Get the MSR write permissions.
2422 */
2423 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2424 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2425 if (ASMBitTest(pbMsrBitmap, (offMsrWrite << 3) + iBit))
2426 fRet |= VMXMSRPM_EXIT_WR;
2427 else
2428 fRet |= VMXMSRPM_ALLOW_WR;
2429
2430 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2431 return fRet;
2432}
2433
2434
2435/**
2436 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2437 * to see if causes a VM-exit.
2438 *
2439 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2440 * @param pbIoBitmap Pointer to I/O bitmap.
2441 * @param uPort The I/O port being accessed.
2442 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2443 */
2444static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
2445{
2446 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2447
2448 /*
2449 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2450 * VM-exit.
2451 *
2452 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2453 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2454 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2455 * access to -both- ports 0xffff and port 0 is a wrap around.
2456 *
2457 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2458 */
2459 uint32_t const uPortLast = uPort + cbAccess;
2460 if (uPortLast > 0x10000)
2461 return true;
2462
2463 /*
2464 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2465 */
2466 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2467 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2468 Assert(idxPermBit < 8);
2469 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2470 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2471
2472 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2473 RTUINT16U uPerm;
2474 uPerm.s.Lo = pbIoBitmap[offPerm];
2475 if (idxPermBit + cbAccess > 8)
2476 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
2477 else
2478 uPerm.s.Hi = 0;
2479
2480 /* If any bit for the access is 1, we must cause a VM-exit. */
2481 if (uPerm.u & fMask)
2482 return true;
2483
2484 return false;
2485}
2486
2487
2488/**
2489 * Returns whether the given VMCS field is valid and supported for the guest.
2490 *
2491 * @param pVM The cross context VM structure.
2492 * @param u64VmcsField The VMCS field.
2493 *
2494 * @remarks This takes into account the CPU features exposed to the guest.
2495 */
2496VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2497{
2498 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2499 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2500 if (!uFieldEncHi)
2501 { /* likely */ }
2502 else
2503 return false;
2504
2505 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2506 switch (uFieldEncLo)
2507 {
2508 /*
2509 * 16-bit fields.
2510 */
2511 /* Control fields. */
2512 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2513 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2514 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2515
2516 /* Guest-state fields. */
2517 case VMX_VMCS16_GUEST_ES_SEL:
2518 case VMX_VMCS16_GUEST_CS_SEL:
2519 case VMX_VMCS16_GUEST_SS_SEL:
2520 case VMX_VMCS16_GUEST_DS_SEL:
2521 case VMX_VMCS16_GUEST_FS_SEL:
2522 case VMX_VMCS16_GUEST_GS_SEL:
2523 case VMX_VMCS16_GUEST_LDTR_SEL:
2524 case VMX_VMCS16_GUEST_TR_SEL: return true;
2525 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2526 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2527
2528 /* Host-state fields. */
2529 case VMX_VMCS16_HOST_ES_SEL:
2530 case VMX_VMCS16_HOST_CS_SEL:
2531 case VMX_VMCS16_HOST_SS_SEL:
2532 case VMX_VMCS16_HOST_DS_SEL:
2533 case VMX_VMCS16_HOST_FS_SEL:
2534 case VMX_VMCS16_HOST_GS_SEL:
2535 case VMX_VMCS16_HOST_TR_SEL: return true;
2536
2537 /*
2538 * 64-bit fields.
2539 */
2540 /* Control fields. */
2541 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2542 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2543 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2544 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2545 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2546 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2547 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2548 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2549 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2550 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2551 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2552 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2553 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2554 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2555 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2556 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2557 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2558 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2559 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2560 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2561 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2562 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2563 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2564 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2565 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2566 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2567 case VMX_VMCS64_CTRL_EPTP_FULL:
2568 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2569 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2570 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2571 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2572 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2573 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2574 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2575 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2576 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2577 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2578 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2579 {
2580 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2581 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2582 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2583 }
2584 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2585 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2586 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2587 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2588 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2589 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2590 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2591 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2592 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2593 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2594 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2595 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
2596
2597 /* Read-only data fields. */
2598 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2599 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2600
2601 /* Guest-state fields. */
2602 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2603 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2604 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2605 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2606 case VMX_VMCS64_GUEST_PAT_FULL:
2607 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2608 case VMX_VMCS64_GUEST_EFER_FULL:
2609 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2610 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2611 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2612 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2613 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2614 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2615 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2616 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2617 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2618
2619 /* Host-state fields. */
2620 case VMX_VMCS64_HOST_PAT_FULL:
2621 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2622 case VMX_VMCS64_HOST_EFER_FULL:
2623 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2624
2625 /*
2626 * 32-bit fields.
2627 */
2628 /* Control fields. */
2629 case VMX_VMCS32_CTRL_PIN_EXEC:
2630 case VMX_VMCS32_CTRL_PROC_EXEC:
2631 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2632 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2633 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2634 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2635 case VMX_VMCS32_CTRL_EXIT:
2636 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2637 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2638 case VMX_VMCS32_CTRL_ENTRY:
2639 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2640 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2641 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2642 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2643 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2644 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2645 case VMX_VMCS32_CTRL_PLE_GAP:
2646 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2647
2648 /* Read-only data fields. */
2649 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2650 case VMX_VMCS32_RO_EXIT_REASON:
2651 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2652 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2653 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2654 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2655 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2656 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2657
2658 /* Guest-state fields. */
2659 case VMX_VMCS32_GUEST_ES_LIMIT:
2660 case VMX_VMCS32_GUEST_CS_LIMIT:
2661 case VMX_VMCS32_GUEST_SS_LIMIT:
2662 case VMX_VMCS32_GUEST_DS_LIMIT:
2663 case VMX_VMCS32_GUEST_FS_LIMIT:
2664 case VMX_VMCS32_GUEST_GS_LIMIT:
2665 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2666 case VMX_VMCS32_GUEST_TR_LIMIT:
2667 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2668 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2669 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2670 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2671 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2672 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2673 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2674 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2675 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2676 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2677 case VMX_VMCS32_GUEST_INT_STATE:
2678 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2679 case VMX_VMCS32_GUEST_SMBASE:
2680 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2681 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2682
2683 /* Host-state fields. */
2684 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2685
2686 /*
2687 * Natural-width fields.
2688 */
2689 /* Control fields. */
2690 case VMX_VMCS_CTRL_CR0_MASK:
2691 case VMX_VMCS_CTRL_CR4_MASK:
2692 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2693 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2694 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2695 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2696 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2697 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2698
2699 /* Read-only data fields. */
2700 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2701 case VMX_VMCS_RO_IO_RCX:
2702 case VMX_VMCS_RO_IO_RSI:
2703 case VMX_VMCS_RO_IO_RDI:
2704 case VMX_VMCS_RO_IO_RIP:
2705 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2706
2707 /* Guest-state fields. */
2708 case VMX_VMCS_GUEST_CR0:
2709 case VMX_VMCS_GUEST_CR3:
2710 case VMX_VMCS_GUEST_CR4:
2711 case VMX_VMCS_GUEST_ES_BASE:
2712 case VMX_VMCS_GUEST_CS_BASE:
2713 case VMX_VMCS_GUEST_SS_BASE:
2714 case VMX_VMCS_GUEST_DS_BASE:
2715 case VMX_VMCS_GUEST_FS_BASE:
2716 case VMX_VMCS_GUEST_GS_BASE:
2717 case VMX_VMCS_GUEST_LDTR_BASE:
2718 case VMX_VMCS_GUEST_TR_BASE:
2719 case VMX_VMCS_GUEST_GDTR_BASE:
2720 case VMX_VMCS_GUEST_IDTR_BASE:
2721 case VMX_VMCS_GUEST_DR7:
2722 case VMX_VMCS_GUEST_RSP:
2723 case VMX_VMCS_GUEST_RIP:
2724 case VMX_VMCS_GUEST_RFLAGS:
2725 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2726 case VMX_VMCS_GUEST_SYSENTER_ESP:
2727 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2728
2729 /* Host-state fields. */
2730 case VMX_VMCS_HOST_CR0:
2731 case VMX_VMCS_HOST_CR3:
2732 case VMX_VMCS_HOST_CR4:
2733 case VMX_VMCS_HOST_FS_BASE:
2734 case VMX_VMCS_HOST_GS_BASE:
2735 case VMX_VMCS_HOST_TR_BASE:
2736 case VMX_VMCS_HOST_GDTR_BASE:
2737 case VMX_VMCS_HOST_IDTR_BASE:
2738 case VMX_VMCS_HOST_SYSENTER_ESP:
2739 case VMX_VMCS_HOST_SYSENTER_EIP:
2740 case VMX_VMCS_HOST_RSP:
2741 case VMX_VMCS_HOST_RIP: return true;
2742 }
2743
2744 return false;
2745}
2746
2747
2748/**
2749 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2750 *
2751 * @returns @c true if it causes a VM-exit, @c false otherwise.
2752 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2753 * @param u16Port The I/O port being accessed.
2754 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2755 */
2756VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2757{
2758 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2759 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2760 return true;
2761
2762 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2763 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
2764
2765 return false;
2766}
2767
2768
2769/**
2770 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2771 *
2772 * @returns @c true if it causes a VM-exit, @c false otherwise.
2773 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2774 * @param uNewCr3 The CR3 value being written.
2775 */
2776VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2777{
2778 /*
2779 * If the CR3-load exiting control is set and the new CR3 value does not
2780 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2781 *
2782 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2783 */
2784 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
2785 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2786 {
2787 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
2788 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2789
2790 /* If the CR3-target count is 0, cause a VM-exit. */
2791 if (uCr3TargetCount == 0)
2792 return true;
2793
2794 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2795 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2796 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2797 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2798 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2799 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
2800 return true;
2801 }
2802 return false;
2803}
2804
2805
2806/**
2807 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2808 * VM-exit or not.
2809 *
2810 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2811 * @param pVCpu The cross context virtual CPU structure.
2812 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2813 * VMX_EXIT_VMREAD).
2814 * @param u64VmcsField The VMCS field.
2815 */
2816VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2817{
2818 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2819 Assert( uExitReason == VMX_EXIT_VMREAD
2820 || uExitReason == VMX_EXIT_VMWRITE);
2821
2822 /*
2823 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2824 */
2825 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2826 return true;
2827
2828 /*
2829 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2830 * is intercepted. This excludes any reserved bits in the valid parts of the field
2831 * encoding (i.e. bit 12).
2832 */
2833 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2834 return true;
2835
2836 /*
2837 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2838 */
2839 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2840 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2841 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2842 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
2843 Assert(pbBitmap);
2844 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2845 return ASMBitTest(pbBitmap, (u32VmcsField << 3) + (u32VmcsField & 7));
2846}
2847
2848
2849
2850/**
2851 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2852 *
2853 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2854 * @param u16Port The IO port being accessed.
2855 * @param enmIoType The type of IO access.
2856 * @param cbReg The IO operand size in bytes.
2857 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2858 * @param iEffSeg The effective segment number.
2859 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2860 * @param fStrIo Whether this is a string IO instruction.
2861 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2862 * Optional, can be NULL.
2863 */
2864VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2865 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2866 PSVMIOIOEXITINFO pIoExitInfo)
2867{
2868 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2869 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2870
2871 /*
2872 * The IOPM layout:
2873 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2874 * two 4K pages.
2875 *
2876 * For IO instructions that access more than a single byte, the permission bits
2877 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2878 *
2879 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2880 * we need 3 extra bits beyond the second 4K page.
2881 */
2882 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2883
2884 uint16_t const offIopm = u16Port >> 3;
2885 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2886 uint8_t const cShift = u16Port - (offIopm << 3);
2887 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2888
2889 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2890 Assert(pbIopm);
2891 pbIopm += offIopm;
2892 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2893 if (u16Iopm & fIopmMask)
2894 {
2895 if (pIoExitInfo)
2896 {
2897 static const uint32_t s_auIoOpSize[] =
2898 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2899
2900 static const uint32_t s_auIoAddrSize[] =
2901 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2902
2903 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2904 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2905 pIoExitInfo->n.u1Str = fStrIo;
2906 pIoExitInfo->n.u1Rep = fRep;
2907 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2908 pIoExitInfo->n.u1Type = enmIoType;
2909 pIoExitInfo->n.u16Port = u16Port;
2910 }
2911 return true;
2912 }
2913
2914 /** @todo remove later (for debugging as VirtualBox always traps all IO
2915 * intercepts). */
2916 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2917 return false;
2918}
2919
2920
2921/**
2922 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2923 *
2924 * @returns VBox status code.
2925 * @param idMsr The MSR being requested.
2926 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2927 * bitmap for @a idMsr.
2928 * @param puMsrpmBit Where to store the bit offset starting at the byte
2929 * returned in @a pbOffMsrpm.
2930 */
2931VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2932{
2933 Assert(pbOffMsrpm);
2934 Assert(puMsrpmBit);
2935
2936 /*
2937 * MSRPM Layout:
2938 * Byte offset MSR range
2939 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2940 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2941 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2942 * 0x1800 - 0x1fff Reserved
2943 *
2944 * Each MSR is represented by 2 permission bits (read and write).
2945 */
2946 if (idMsr <= 0x00001fff)
2947 {
2948 /* Pentium-compatible MSRs. */
2949 uint32_t const bitoffMsr = idMsr << 1;
2950 *pbOffMsrpm = bitoffMsr >> 3;
2951 *puMsrpmBit = bitoffMsr & 7;
2952 return VINF_SUCCESS;
2953 }
2954
2955 if ( idMsr >= 0xc0000000
2956 && idMsr <= 0xc0001fff)
2957 {
2958 /* AMD Sixth Generation x86 Processor MSRs. */
2959 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2960 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2961 *puMsrpmBit = bitoffMsr & 7;
2962 return VINF_SUCCESS;
2963 }
2964
2965 if ( idMsr >= 0xc0010000
2966 && idMsr <= 0xc0011fff)
2967 {
2968 /* AMD Seventh and Eighth Generation Processor MSRs. */
2969 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
2970 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
2971 *puMsrpmBit = bitoffMsr & 7;
2972 return VINF_SUCCESS;
2973 }
2974
2975 *pbOffMsrpm = 0;
2976 *puMsrpmBit = 0;
2977 return VERR_OUT_OF_RANGE;
2978}
2979
2980
2981/**
2982 * Checks whether the guest is in VMX non-root mode and using EPT paging.
2983 *
2984 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
2985 * @param pVCpu The cross context virtual CPU structure.
2986 */
2987VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
2988{
2989 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
2990}
2991
2992
2993/**
2994 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
2995 * nested-guest is in PAE mode.
2996 *
2997 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
2998 * @param pVCpu The cross context virtual CPU structure.
2999 */
3000VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3001{
3002 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3003 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3004}
3005
3006
3007/**
3008 * Returns the guest-physical address of the APIC-access page when executing a
3009 * nested-guest.
3010 *
3011 * @returns The APIC-access page guest-physical address.
3012 * @param pVCpu The cross context virtual CPU structure.
3013 */
3014VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3015{
3016 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3017}
3018
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette