VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 97231

最後變更 在這個檔案從97231是 97231,由 vboxsync 提交於 2 年 前

VMM/CPUM: Define our own X86EFLAGS/X86RFLAGS structures so we can use reserved bits for internal state.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 99.1 KB
 
1/* $Id: CPUMAllRegs.cpp 97231 2022-10-19 09:12:57Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#ifdef IN_RING3
54# include <iprt/thread.h>
55#endif
56
57/** Disable stack frame pointer generation here. */
58#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
59# pragma optimize("y", off)
60#endif
61
62AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
63
64
65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
68/**
69 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
70 *
71 * @returns Pointer to the Virtual CPU.
72 * @param a_pGuestCtx Pointer to the guest context.
73 */
74#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
75
76/**
77 * Lazily loads the hidden parts of a selector register when using raw-mode.
78 */
79#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
80 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
81
82/** @def CPUM_INT_ASSERT_NOT_EXTRN
83 * Macro for asserting that @a a_fNotExtrn are present.
84 *
85 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
86 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
87 */
88#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
89 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
90 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
91
92
93VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
94{
95 pVCpu->cpum.s.Hyper.cr3 = cr3;
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104/** @def MAYBE_LOAD_DRx
105 * Macro for updating DRx values in raw-mode and ring-0 contexts.
106 */
107#ifdef IN_RING0
108# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
109#else
110# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
111#endif
112
113VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
114{
115 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
116 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
117}
118
119
120VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
121{
122 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
123 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
124}
125
126
127VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
128{
129 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
130 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
131}
132
133
134VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
135{
136 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
137 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
138}
139
140
141VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
142{
143 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
144}
145
146
147VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
148{
149 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
150}
151
152
153VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
154{
155 return pVCpu->cpum.s.Hyper.dr[0];
156}
157
158
159VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
160{
161 return pVCpu->cpum.s.Hyper.dr[1];
162}
163
164
165VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
166{
167 return pVCpu->cpum.s.Hyper.dr[2];
168}
169
170
171VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
172{
173 return pVCpu->cpum.s.Hyper.dr[3];
174}
175
176
177VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
178{
179 return pVCpu->cpum.s.Hyper.dr[6];
180}
181
182
183VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
184{
185 return pVCpu->cpum.s.Hyper.dr[7];
186}
187
188
189/**
190 * Checks that the special cookie stored in unused reserved RFLAGS bits
191 *
192 * @retval true if cookie is ok.
193 * @retval false if cookie is not ok.
194 * @param pVM The cross context VM structure.
195 * @param pVCpu The cross context virtual CPU structure.
196 */
197VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu)
198{
199 AssertLogRelMsgReturn( (pVCpu->cpum.s.Guest.rflags.uBoth & ~(uint64_t)(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
200 == pVM->cpum.s.fReservedRFlagsCookie
201 && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK,
202 ("rflags=%#RX64 vs fReservedRFlagsCookie=%#RX64\n",
203 pVCpu->cpum.s.Guest.rflags.uBoth, pVM->cpum.s.fReservedRFlagsCookie),
204 false);
205 return true;
206}
207
208
209/**
210 * Queries the pointer to the internal CPUMCTX structure.
211 *
212 * @returns The CPUMCTX pointer.
213 * @param pVCpu The cross context virtual CPU structure.
214 */
215VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
216{
217 return &pVCpu->cpum.s.Guest;
218}
219
220
221/**
222 * Queries the pointer to the internal CPUMCTXMSRS structure.
223 *
224 * This is for NEM only.
225 *
226 * @returns The CPUMCTX pointer.
227 * @param pVCpu The cross context virtual CPU structure.
228 */
229VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
230{
231 return &pVCpu->cpum.s.GuestMsrs;
232}
233
234
235VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
236{
237 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
238 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
239 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
240 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
241 return VINF_SUCCESS; /* formality, consider it void. */
242}
243
244
245VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
246{
247 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
248 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
249 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
250 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
251 return VINF_SUCCESS; /* formality, consider it void. */
252}
253
254
255VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
256{
257 pVCpu->cpum.s.Guest.tr.Sel = tr;
258 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
259 return VINF_SUCCESS; /* formality, consider it void. */
260}
261
262
263VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
264{
265 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
266 /* The caller will set more hidden bits if it has them. */
267 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
268 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
269 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
270 return VINF_SUCCESS; /* formality, consider it void. */
271}
272
273
274/**
275 * Set the guest CR0.
276 *
277 * When called in GC, the hyper CR0 may be updated if that is
278 * required. The caller only has to take special action if AM,
279 * WP, PG or PE changes.
280 *
281 * @returns VINF_SUCCESS (consider it void).
282 * @param pVCpu The cross context virtual CPU structure.
283 * @param cr0 The new CR0 value.
284 */
285VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
286{
287 /*
288 * Check for changes causing TLB flushes (for REM).
289 * The caller is responsible for calling PGM when appropriate.
290 */
291 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
292 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
293 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
294 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
295
296 /*
297 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
298 */
299 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
300 PGMCr0WpEnabled(pVCpu);
301
302 /* The ET flag is settable on a 386 and hardwired on 486+. */
303 if ( !(cr0 & X86_CR0_ET)
304 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
305 cr0 |= X86_CR0_ET;
306
307 pVCpu->cpum.s.Guest.cr0 = cr0;
308 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
309 return VINF_SUCCESS;
310}
311
312
313VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
314{
315 pVCpu->cpum.s.Guest.cr2 = cr2;
316 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
317 return VINF_SUCCESS;
318}
319
320
321VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
322{
323 pVCpu->cpum.s.Guest.cr3 = cr3;
324 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
325 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
326 return VINF_SUCCESS;
327}
328
329
330VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
331{
332 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
333
334 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
335 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
336 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
337
338 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
339 pVCpu->cpum.s.Guest.cr4 = cr4;
340 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
341 return VINF_SUCCESS;
342}
343
344
345VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
346{
347 pVCpu->cpum.s.Guest.eflags.u = eflags;
348 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
349 return VINF_SUCCESS;
350}
351
352
353VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
354{
355 pVCpu->cpum.s.Guest.eip = eip;
356 return VINF_SUCCESS;
357}
358
359
360VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
361{
362 pVCpu->cpum.s.Guest.eax = eax;
363 return VINF_SUCCESS;
364}
365
366
367VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
368{
369 pVCpu->cpum.s.Guest.ebx = ebx;
370 return VINF_SUCCESS;
371}
372
373
374VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
375{
376 pVCpu->cpum.s.Guest.ecx = ecx;
377 return VINF_SUCCESS;
378}
379
380
381VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
382{
383 pVCpu->cpum.s.Guest.edx = edx;
384 return VINF_SUCCESS;
385}
386
387
388VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
389{
390 pVCpu->cpum.s.Guest.esp = esp;
391 return VINF_SUCCESS;
392}
393
394
395VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
396{
397 pVCpu->cpum.s.Guest.ebp = ebp;
398 return VINF_SUCCESS;
399}
400
401
402VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
403{
404 pVCpu->cpum.s.Guest.esi = esi;
405 return VINF_SUCCESS;
406}
407
408
409VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
410{
411 pVCpu->cpum.s.Guest.edi = edi;
412 return VINF_SUCCESS;
413}
414
415
416VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
417{
418 pVCpu->cpum.s.Guest.ss.Sel = ss;
419 return VINF_SUCCESS;
420}
421
422
423VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
424{
425 pVCpu->cpum.s.Guest.cs.Sel = cs;
426 return VINF_SUCCESS;
427}
428
429
430VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
431{
432 pVCpu->cpum.s.Guest.ds.Sel = ds;
433 return VINF_SUCCESS;
434}
435
436
437VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
438{
439 pVCpu->cpum.s.Guest.es.Sel = es;
440 return VINF_SUCCESS;
441}
442
443
444VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
445{
446 pVCpu->cpum.s.Guest.fs.Sel = fs;
447 return VINF_SUCCESS;
448}
449
450
451VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
452{
453 pVCpu->cpum.s.Guest.gs.Sel = gs;
454 return VINF_SUCCESS;
455}
456
457
458VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
459{
460 pVCpu->cpum.s.Guest.msrEFER = val;
461 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
462}
463
464
465VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
466{
467 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
468 if (pcbLimit)
469 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
470 return pVCpu->cpum.s.Guest.idtr.pIdt;
471}
472
473
474VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
475{
476 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
477 if (pHidden)
478 *pHidden = pVCpu->cpum.s.Guest.tr;
479 return pVCpu->cpum.s.Guest.tr.Sel;
480}
481
482
483VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
484{
485 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
486 return pVCpu->cpum.s.Guest.cs.Sel;
487}
488
489
490VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
491{
492 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
493 return pVCpu->cpum.s.Guest.ds.Sel;
494}
495
496
497VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
498{
499 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
500 return pVCpu->cpum.s.Guest.es.Sel;
501}
502
503
504VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
505{
506 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
507 return pVCpu->cpum.s.Guest.fs.Sel;
508}
509
510
511VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
512{
513 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
514 return pVCpu->cpum.s.Guest.gs.Sel;
515}
516
517
518VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
519{
520 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
521 return pVCpu->cpum.s.Guest.ss.Sel;
522}
523
524
525VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
526{
527 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
528 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
529 if ( !CPUMIsGuestInLongMode(pVCpu)
530 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
531 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
532 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
533}
534
535
536VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
537{
538 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
539 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
540 if ( !CPUMIsGuestInLongMode(pVCpu)
541 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
542 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
543 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
544}
545
546
547VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
548{
549 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
550 return pVCpu->cpum.s.Guest.ldtr.Sel;
551}
552
553
554VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
555{
556 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
557 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
558 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
559 return pVCpu->cpum.s.Guest.ldtr.Sel;
560}
561
562
563VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
564{
565 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
566 return pVCpu->cpum.s.Guest.cr0;
567}
568
569
570VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
571{
572 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
573 return pVCpu->cpum.s.Guest.cr2;
574}
575
576
577VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
578{
579 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
580 return pVCpu->cpum.s.Guest.cr3;
581}
582
583
584VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
585{
586 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
587 return pVCpu->cpum.s.Guest.cr4;
588}
589
590
591VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
592{
593 uint64_t u64;
594 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
595 if (RT_FAILURE(rc))
596 u64 = 0;
597 return u64;
598}
599
600
601VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
602{
603 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
604 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
605}
606
607
608VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
609{
610 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
611 return pVCpu->cpum.s.Guest.eip;
612}
613
614
615VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
616{
617 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
618 return pVCpu->cpum.s.Guest.rip;
619}
620
621
622VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
623{
624 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
625 return pVCpu->cpum.s.Guest.eax;
626}
627
628
629VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
630{
631 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
632 return pVCpu->cpum.s.Guest.ebx;
633}
634
635
636VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
637{
638 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
639 return pVCpu->cpum.s.Guest.ecx;
640}
641
642
643VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
644{
645 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
646 return pVCpu->cpum.s.Guest.edx;
647}
648
649
650VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
651{
652 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
653 return pVCpu->cpum.s.Guest.esi;
654}
655
656
657VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
658{
659 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
660 return pVCpu->cpum.s.Guest.edi;
661}
662
663
664VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
665{
666 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
667 return pVCpu->cpum.s.Guest.esp;
668}
669
670
671VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
672{
673 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
674 return pVCpu->cpum.s.Guest.ebp;
675}
676
677
678VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
679{
680 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
681 return pVCpu->cpum.s.Guest.eflags.u;
682}
683
684
685VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
686{
687 switch (iReg)
688 {
689 case DISCREG_CR0:
690 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
691 *pValue = pVCpu->cpum.s.Guest.cr0;
692 break;
693
694 case DISCREG_CR2:
695 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
696 *pValue = pVCpu->cpum.s.Guest.cr2;
697 break;
698
699 case DISCREG_CR3:
700 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
701 *pValue = pVCpu->cpum.s.Guest.cr3;
702 break;
703
704 case DISCREG_CR4:
705 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
706 *pValue = pVCpu->cpum.s.Guest.cr4;
707 break;
708
709 case DISCREG_CR8:
710 {
711 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
712 uint8_t u8Tpr;
713 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
714 if (RT_FAILURE(rc))
715 {
716 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
717 *pValue = 0;
718 return rc;
719 }
720 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
721 break;
722 }
723
724 default:
725 return VERR_INVALID_PARAMETER;
726 }
727 return VINF_SUCCESS;
728}
729
730
731VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
732{
733 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
734 return pVCpu->cpum.s.Guest.dr[0];
735}
736
737
738VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
739{
740 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
741 return pVCpu->cpum.s.Guest.dr[1];
742}
743
744
745VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
746{
747 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
748 return pVCpu->cpum.s.Guest.dr[2];
749}
750
751
752VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
753{
754 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
755 return pVCpu->cpum.s.Guest.dr[3];
756}
757
758
759VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
760{
761 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
762 return pVCpu->cpum.s.Guest.dr[6];
763}
764
765
766VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
767{
768 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
769 return pVCpu->cpum.s.Guest.dr[7];
770}
771
772
773VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
774{
775 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
776 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
777 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
778 if (iReg == 4 || iReg == 5)
779 iReg += 2;
780 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
781 return VINF_SUCCESS;
782}
783
784
785VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
786{
787 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
788 return pVCpu->cpum.s.Guest.msrEFER;
789}
790
791
792/**
793 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
794 *
795 * @returns Pointer to the leaf if found, NULL if not.
796 *
797 * @param pVM The cross context VM structure.
798 * @param uLeaf The leaf to get.
799 */
800PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
801{
802 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
803 if (iEnd)
804 {
805 unsigned iStart = 0;
806 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
807 for (;;)
808 {
809 unsigned i = iStart + (iEnd - iStart) / 2U;
810 if (uLeaf < paLeaves[i].uLeaf)
811 {
812 if (i <= iStart)
813 return NULL;
814 iEnd = i;
815 }
816 else if (uLeaf > paLeaves[i].uLeaf)
817 {
818 i += 1;
819 if (i >= iEnd)
820 return NULL;
821 iStart = i;
822 }
823 else
824 {
825 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
826 return &paLeaves[i];
827
828 /* This shouldn't normally happen. But in case the it does due
829 to user configuration overrids or something, just return the
830 first sub-leaf. */
831 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
832 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
833 while ( paLeaves[i].uSubLeaf != 0
834 && i > 0
835 && uLeaf == paLeaves[i - 1].uLeaf)
836 i--;
837 return &paLeaves[i];
838 }
839 }
840 }
841
842 return NULL;
843}
844
845
846/**
847 * Looks up a CPUID leaf in the CPUID leaf array.
848 *
849 * @returns Pointer to the leaf if found, NULL if not.
850 *
851 * @param pVM The cross context VM structure.
852 * @param uLeaf The leaf to get.
853 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
854 * isn't.
855 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
856 */
857PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
858{
859 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
860 if (iEnd)
861 {
862 unsigned iStart = 0;
863 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
864 for (;;)
865 {
866 unsigned i = iStart + (iEnd - iStart) / 2U;
867 if (uLeaf < paLeaves[i].uLeaf)
868 {
869 if (i <= iStart)
870 return NULL;
871 iEnd = i;
872 }
873 else if (uLeaf > paLeaves[i].uLeaf)
874 {
875 i += 1;
876 if (i >= iEnd)
877 return NULL;
878 iStart = i;
879 }
880 else
881 {
882 uSubLeaf &= paLeaves[i].fSubLeafMask;
883 if (uSubLeaf == paLeaves[i].uSubLeaf)
884 *pfExactSubLeafHit = true;
885 else
886 {
887 /* Find the right subleaf. We return the last one before
888 uSubLeaf if we don't find an exact match. */
889 if (uSubLeaf < paLeaves[i].uSubLeaf)
890 while ( i > 0
891 && uLeaf == paLeaves[i - 1].uLeaf
892 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
893 i--;
894 else
895 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
896 && uLeaf == paLeaves[i + 1].uLeaf
897 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
898 i++;
899 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
900 }
901 return &paLeaves[i];
902 }
903 }
904 }
905
906 *pfExactSubLeafHit = false;
907 return NULL;
908}
909
910
911/**
912 * Gets a CPUID leaf.
913 *
914 * @param pVCpu The cross context virtual CPU structure.
915 * @param uLeaf The CPUID leaf to get.
916 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
917 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
918 * not: 1=true, 0=false, 1=whatever. This affect how the
919 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
920 * Intel CPUs, where it's only returned in 64-bit mode.
921 * @param pEax Where to store the EAX value.
922 * @param pEbx Where to store the EBX value.
923 * @param pEcx Where to store the ECX value.
924 * @param pEdx Where to store the EDX value.
925 */
926VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
927 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
928{
929 bool fExactSubLeafHit;
930 PVM pVM = pVCpu->CTX_SUFF(pVM);
931 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
932 if (pLeaf)
933 {
934 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
935 if (fExactSubLeafHit)
936 {
937 *pEax = pLeaf->uEax;
938 *pEbx = pLeaf->uEbx;
939 *pEcx = pLeaf->uEcx;
940 *pEdx = pLeaf->uEdx;
941
942 /*
943 * Deal with CPU specific information.
944 */
945 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
946 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
947 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
948 {
949 if (uLeaf == 1)
950 {
951 /* EBX: Bits 31-24: Initial APIC ID. */
952 Assert(pVCpu->idCpu <= 255);
953 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
954 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
955
956 /* EDX: Bit 9: AND with APICBASE.EN. */
957 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
958 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
959
960 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
961 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
962 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
963 }
964 else if (uLeaf == 0xb)
965 {
966 /* EDX: Initial extended APIC ID. */
967 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
968 *pEdx = pVCpu->idCpu;
969 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
970 }
971 else if (uLeaf == UINT32_C(0x8000001e))
972 {
973 /* EAX: Initial extended APIC ID. */
974 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
975 *pEax = pVCpu->idCpu;
976 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
977 }
978 else if (uLeaf == UINT32_C(0x80000001))
979 {
980 /* EDX: Bit 9: AND with APICBASE.EN. */
981 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
982 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
983 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
984 }
985 else
986 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
987 }
988
989 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
990 if ( uLeaf == UINT32_C(0x80000001)
991 && f64BitMode == false
992 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
993 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
994 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
995 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
996 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
997
998 }
999 /*
1000 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1001 * them here, but we do the best we can here...
1002 */
1003 else
1004 {
1005 *pEax = *pEbx = *pEcx = *pEdx = 0;
1006 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1007 {
1008 *pEcx = uSubLeaf & 0xff;
1009 *pEdx = pVCpu->idCpu;
1010 }
1011 }
1012 }
1013 else
1014 {
1015 /*
1016 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1017 */
1018 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1019 {
1020 default:
1021 AssertFailed();
1022 RT_FALL_THRU();
1023 case CPUMUNKNOWNCPUID_DEFAULTS:
1024 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1025 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1026 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1027 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1028 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1029 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1030 break;
1031 case CPUMUNKNOWNCPUID_PASSTHRU:
1032 *pEax = uLeaf;
1033 *pEbx = 0;
1034 *pEcx = uSubLeaf;
1035 *pEdx = 0;
1036 break;
1037 }
1038 }
1039 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1040}
1041
1042
1043/**
1044 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1045 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1046 *
1047 * @returns Previous value.
1048 * @param pVCpu The cross context virtual CPU structure to make the
1049 * change on. Usually the calling EMT.
1050 * @param fVisible Whether to make it visible (true) or hide it (false).
1051 *
1052 * @remarks This is "VMMDECL" so that it still links with
1053 * the old APIC code which is in VBoxDD2 and not in
1054 * the VMM module.
1055 */
1056VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1057{
1058 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1059 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1060 return fOld;
1061}
1062
1063
1064/**
1065 * Gets the host CPU vendor.
1066 *
1067 * @returns CPU vendor.
1068 * @param pVM The cross context VM structure.
1069 */
1070VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1071{
1072 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1073}
1074
1075
1076/**
1077 * Gets the host CPU microarchitecture.
1078 *
1079 * @returns CPU microarchitecture.
1080 * @param pVM The cross context VM structure.
1081 */
1082VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1083{
1084 return pVM->cpum.s.HostFeatures.enmMicroarch;
1085}
1086
1087
1088/**
1089 * Gets the guest CPU vendor.
1090 *
1091 * @returns CPU vendor.
1092 * @param pVM The cross context VM structure.
1093 */
1094VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1095{
1096 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1097}
1098
1099
1100/**
1101 * Gets the guest CPU microarchitecture.
1102 *
1103 * @returns CPU microarchitecture.
1104 * @param pVM The cross context VM structure.
1105 */
1106VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1107{
1108 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1109}
1110
1111
1112/**
1113 * Gets the maximum number of physical and linear address bits supported by the
1114 * guest.
1115 *
1116 * @param pVM The cross context VM structure.
1117 * @param pcPhysAddrWidth Where to store the physical address width.
1118 * @param pcLinearAddrWidth Where to store the linear address width.
1119 */
1120VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1121{
1122 AssertPtr(pVM);
1123 AssertReturnVoid(pcPhysAddrWidth);
1124 AssertReturnVoid(pcLinearAddrWidth);
1125 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1126 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1127}
1128
1129
1130VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1131{
1132 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1133 return CPUMRecalcHyperDRx(pVCpu, 0);
1134}
1135
1136
1137VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1138{
1139 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1140 return CPUMRecalcHyperDRx(pVCpu, 1);
1141}
1142
1143
1144VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1145{
1146 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1147 return CPUMRecalcHyperDRx(pVCpu, 2);
1148}
1149
1150
1151VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1152{
1153 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1154 return CPUMRecalcHyperDRx(pVCpu, 3);
1155}
1156
1157
1158VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1159{
1160 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1161 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1162 return VINF_SUCCESS; /* No need to recalc. */
1163}
1164
1165
1166VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1167{
1168 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1169 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1170 return CPUMRecalcHyperDRx(pVCpu, 7);
1171}
1172
1173
1174VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1175{
1176 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1177 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1178 if (iReg == 4 || iReg == 5)
1179 iReg += 2;
1180 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1181 return CPUMRecalcHyperDRx(pVCpu, iReg);
1182}
1183
1184
1185/**
1186 * Recalculates the hypervisor DRx register values based on current guest
1187 * registers and DBGF breakpoints, updating changed registers depending on the
1188 * context.
1189 *
1190 * This is called whenever a guest DRx register is modified (any context) and
1191 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1192 *
1193 * In raw-mode context this function will reload any (hyper) DRx registers which
1194 * comes out with a different value. It may also have to save the host debug
1195 * registers if that haven't been done already. In this context though, we'll
1196 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1197 * are only important when breakpoints are actually enabled.
1198 *
1199 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1200 * reloaded by the HM code if it changes. Further more, we will only use the
1201 * combined register set when the VBox debugger is actually using hardware BPs,
1202 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1203 * concern us here).
1204 *
1205 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1206 * all the time.
1207 *
1208 * @returns VINF_SUCCESS.
1209 * @param pVCpu The cross context virtual CPU structure.
1210 * @param iGstReg The guest debug register number that was modified.
1211 * UINT8_MAX if not guest register.
1212 */
1213VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
1214{
1215 PVM pVM = pVCpu->CTX_SUFF(pVM);
1216#ifndef IN_RING0
1217 RT_NOREF_PV(iGstReg);
1218#endif
1219
1220 /*
1221 * Compare the DR7s first.
1222 *
1223 * We only care about the enabled flags. GD is virtualized when we
1224 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1225 * always have the LE and GE bits set, so no need to check and disable
1226 * stuff if they're cleared like we have to for the guest DR7.
1227 */
1228 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1229 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1230 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1231 uGstDr7 = 0;
1232 else if (!(uGstDr7 & X86_DR7_LE))
1233 uGstDr7 &= ~X86_DR7_LE_ALL;
1234 else if (!(uGstDr7 & X86_DR7_GE))
1235 uGstDr7 &= ~X86_DR7_GE_ALL;
1236
1237 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1238 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1239 {
1240 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1241
1242 /*
1243 * Ok, something is enabled. Recalc each of the breakpoints, taking
1244 * the VM debugger ones of the guest ones. In raw-mode context we will
1245 * not allow breakpoints with values inside the hypervisor area.
1246 */
1247 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1248
1249 /* bp 0 */
1250 RTGCUINTREG uNewDr0;
1251 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1252 {
1253 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1254 uNewDr0 = DBGFBpGetDR0(pVM);
1255 }
1256 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1257 {
1258 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1259 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1260 }
1261 else
1262 uNewDr0 = 0;
1263
1264 /* bp 1 */
1265 RTGCUINTREG uNewDr1;
1266 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1267 {
1268 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1269 uNewDr1 = DBGFBpGetDR1(pVM);
1270 }
1271 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1272 {
1273 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1274 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1275 }
1276 else
1277 uNewDr1 = 0;
1278
1279 /* bp 2 */
1280 RTGCUINTREG uNewDr2;
1281 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1282 {
1283 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1284 uNewDr2 = DBGFBpGetDR2(pVM);
1285 }
1286 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1287 {
1288 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1289 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1290 }
1291 else
1292 uNewDr2 = 0;
1293
1294 /* bp 3 */
1295 RTGCUINTREG uNewDr3;
1296 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1297 {
1298 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1299 uNewDr3 = DBGFBpGetDR3(pVM);
1300 }
1301 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1302 {
1303 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1304 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1305 }
1306 else
1307 uNewDr3 = 0;
1308
1309 /*
1310 * Apply the updates.
1311 */
1312 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1313 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1314 CPUMSetHyperDR3(pVCpu, uNewDr3);
1315 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1316 CPUMSetHyperDR2(pVCpu, uNewDr2);
1317 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1318 CPUMSetHyperDR1(pVCpu, uNewDr1);
1319 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1320 CPUMSetHyperDR0(pVCpu, uNewDr0);
1321 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1322 CPUMSetHyperDR7(pVCpu, uNewDr7);
1323 }
1324#ifdef IN_RING0
1325 else if (CPUMIsGuestDebugStateActive(pVCpu))
1326 {
1327 /*
1328 * Reload the register that was modified. Normally this won't happen
1329 * as we won't intercept DRx writes when not having the hyper debug
1330 * state loaded, but in case we do for some reason we'll simply deal
1331 * with it.
1332 */
1333 switch (iGstReg)
1334 {
1335 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1336 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1337 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1338 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1339 default:
1340 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1341 }
1342 }
1343#endif
1344 else
1345 {
1346 /*
1347 * No active debug state any more. In raw-mode this means we have to
1348 * make sure DR7 has everything disabled now, if we armed it already.
1349 * In ring-0 we might end up here when just single stepping.
1350 */
1351#ifdef IN_RING0
1352 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1353 {
1354 if (pVCpu->cpum.s.Hyper.dr[0])
1355 ASMSetDR0(0);
1356 if (pVCpu->cpum.s.Hyper.dr[1])
1357 ASMSetDR1(0);
1358 if (pVCpu->cpum.s.Hyper.dr[2])
1359 ASMSetDR2(0);
1360 if (pVCpu->cpum.s.Hyper.dr[3])
1361 ASMSetDR3(0);
1362 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1363 }
1364#endif
1365 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1366
1367 /* Clear all the registers. */
1368 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1369 pVCpu->cpum.s.Hyper.dr[3] = 0;
1370 pVCpu->cpum.s.Hyper.dr[2] = 0;
1371 pVCpu->cpum.s.Hyper.dr[1] = 0;
1372 pVCpu->cpum.s.Hyper.dr[0] = 0;
1373
1374 }
1375 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1376 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1377 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1378 pVCpu->cpum.s.Hyper.dr[7]));
1379
1380 return VINF_SUCCESS;
1381}
1382
1383
1384/**
1385 * Set the guest XCR0 register.
1386 *
1387 * Will load additional state if the FPU state is already loaded (in ring-0 &
1388 * raw-mode context).
1389 *
1390 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1391 * value.
1392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1393 * @param uNewValue The new value.
1394 * @thread EMT(pVCpu)
1395 */
1396VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1397{
1398 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1399 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1400 /* The X87 bit cannot be cleared. */
1401 && (uNewValue & XSAVE_C_X87)
1402 /* AVX requires SSE. */
1403 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1404 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1405 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1406 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1407 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1408 )
1409 {
1410 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1411
1412 /* If more state components are enabled, we need to take care to load
1413 them if the FPU/SSE state is already loaded. May otherwise leak
1414 host state to the guest. */
1415 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1416 if (fNewComponents)
1417 {
1418#ifdef IN_RING0
1419 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1420 {
1421 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1422 /* Adding more components. */
1423 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
1424 else
1425 {
1426 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1427 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1428 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1429 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1430 }
1431 }
1432#endif
1433 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1434 }
1435 return VINF_SUCCESS;
1436 }
1437 return VERR_CPUM_RAISE_GP_0;
1438}
1439
1440
1441/**
1442 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1443 *
1444 * @returns true if in real mode, otherwise false.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 */
1447VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1448{
1449 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1450 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1451}
1452
1453
1454/**
1455 * Tests if the guest has the Page Size Extension enabled (PSE).
1456 *
1457 * @returns true if in real mode, otherwise false.
1458 * @param pVCpu The cross context virtual CPU structure.
1459 */
1460VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1461{
1462 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1463 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1464 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1465}
1466
1467
1468/**
1469 * Tests if the guest has the paging enabled (PG).
1470 *
1471 * @returns true if in real mode, otherwise false.
1472 * @param pVCpu The cross context virtual CPU structure.
1473 */
1474VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1475{
1476 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1477 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1478}
1479
1480
1481/**
1482 * Tests if the guest has the paging enabled (PG).
1483 *
1484 * @returns true if in real mode, otherwise false.
1485 * @param pVCpu The cross context virtual CPU structure.
1486 */
1487VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1488{
1489 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1490 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1491}
1492
1493
1494/**
1495 * Tests if the guest is running in real mode or not.
1496 *
1497 * @returns true if in real mode, otherwise false.
1498 * @param pVCpu The cross context virtual CPU structure.
1499 */
1500VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1501{
1502 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1503 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1504}
1505
1506
1507/**
1508 * Tests if the guest is running in real or virtual 8086 mode.
1509 *
1510 * @returns @c true if it is, @c false if not.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 */
1513VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1514{
1515 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1516 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1517 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1518}
1519
1520
1521/**
1522 * Tests if the guest is running in protected or not.
1523 *
1524 * @returns true if in protected mode, otherwise false.
1525 * @param pVCpu The cross context virtual CPU structure.
1526 */
1527VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1528{
1529 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1530 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1531}
1532
1533
1534/**
1535 * Tests if the guest is running in paged protected or not.
1536 *
1537 * @returns true if in paged protected mode, otherwise false.
1538 * @param pVCpu The cross context virtual CPU structure.
1539 */
1540VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1541{
1542 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1543 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1544}
1545
1546
1547/**
1548 * Tests if the guest is running in long mode or not.
1549 *
1550 * @returns true if in long mode, otherwise false.
1551 * @param pVCpu The cross context virtual CPU structure.
1552 */
1553VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1554{
1555 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1556 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1557}
1558
1559
1560/**
1561 * Tests if the guest is running in PAE mode or not.
1562 *
1563 * @returns true if in PAE mode, otherwise false.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 */
1566VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1567{
1568 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1569 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1570 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1571 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1572 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1573 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1574}
1575
1576
1577/**
1578 * Tests if the guest is running in 64 bits mode or not.
1579 *
1580 * @returns true if in 64 bits protected mode, otherwise false.
1581 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1582 */
1583VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1584{
1585 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1586 if (!CPUMIsGuestInLongMode(pVCpu))
1587 return false;
1588 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1589 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1590}
1591
1592
1593/**
1594 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1595 * registers.
1596 *
1597 * @returns true if in 64 bits protected mode, otherwise false.
1598 * @param pCtx Pointer to the current guest CPU context.
1599 */
1600VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1601{
1602 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1603}
1604
1605
1606/**
1607 * Sets the specified changed flags (CPUM_CHANGED_*).
1608 *
1609 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1610 * @param fChangedAdd The changed flags to add.
1611 */
1612VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1613{
1614 pVCpu->cpum.s.fChanged |= fChangedAdd;
1615}
1616
1617
1618/**
1619 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1620 *
1621 * @returns true if supported.
1622 * @returns false if not supported.
1623 * @param pVM The cross context VM structure.
1624 */
1625VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1626{
1627 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1628}
1629
1630
1631/**
1632 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1633 * @returns true if used.
1634 * @returns false if not used.
1635 * @param pVM The cross context VM structure.
1636 */
1637VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1638{
1639 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1640}
1641
1642
1643/**
1644 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1645 * @returns true if used.
1646 * @returns false if not used.
1647 * @param pVM The cross context VM structure.
1648 */
1649VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1650{
1651 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1652}
1653
1654
1655/**
1656 * Checks if we activated the FPU/XMM state of the guest OS.
1657 *
1658 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1659 * the next time we'll be executing guest code, so it may return true for
1660 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1661 * it to be loaded the next time we go thru the world switcher
1662 * (CPUM_SYNC_FPU_STATE).
1663 *
1664 * @returns true / false.
1665 * @param pVCpu The cross context virtual CPU structure.
1666 */
1667VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1668{
1669 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1670 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1671 return fRet;
1672}
1673
1674
1675/**
1676 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1677 *
1678 * @returns true / false.
1679 * @param pVCpu The cross context virtual CPU structure.
1680 */
1681VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1682{
1683 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1684 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1685 return fRet;
1686}
1687
1688
1689/**
1690 * Checks if we saved the FPU/XMM state of the host OS.
1691 *
1692 * @returns true / false.
1693 * @param pVCpu The cross context virtual CPU structure.
1694 */
1695VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1696{
1697 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1698}
1699
1700
1701/**
1702 * Checks if the guest debug state is active.
1703 *
1704 * @returns boolean
1705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1706 */
1707VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1708{
1709 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1710}
1711
1712
1713/**
1714 * Checks if the hyper debug state is active.
1715 *
1716 * @returns boolean
1717 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1718 */
1719VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1720{
1721 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1722}
1723
1724
1725/**
1726 * Mark the guest's debug state as inactive.
1727 *
1728 * @returns boolean
1729 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1730 * @todo This API doesn't make sense any more.
1731 */
1732VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1733{
1734 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1735 NOREF(pVCpu);
1736}
1737
1738
1739/**
1740 * Get the current privilege level of the guest.
1741 *
1742 * @returns CPL
1743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1744 */
1745VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1746{
1747 /*
1748 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1749 *
1750 * Note! We used to check CS.DPL here, assuming it was always equal to
1751 * CPL even if a conforming segment was loaded. But this turned out to
1752 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1753 * during install after a far call to ring 2 with VT-x. Then on newer
1754 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1755 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1756 *
1757 * So, forget CS.DPL, always use SS.DPL.
1758 *
1759 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1760 * isn't necessarily equal if the segment is conforming.
1761 * See section 4.11.1 in the AMD manual.
1762 *
1763 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1764 * right after real->prot mode switch and when in V8086 mode? That
1765 * section says the RPL specified in a direct transfere (call, jmp,
1766 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1767 * it would be impossible for an exception handle or the iret
1768 * instruction to figure out whether SS:ESP are part of the frame
1769 * or not. VBox or qemu bug must've lead to this misconception.
1770 *
1771 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1772 * selector into SS with an RPL other than the CPL when CPL != 3 and
1773 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1774 * RPL = CPL. Weird.
1775 */
1776 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1777 uint32_t uCpl;
1778 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1779 {
1780 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1781 {
1782 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1783 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1784 else
1785 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1786 }
1787 else
1788 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1789 }
1790 else
1791 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1792 return uCpl;
1793}
1794
1795
1796/**
1797 * Gets the current guest CPU mode.
1798 *
1799 * If paging mode is what you need, check out PGMGetGuestMode().
1800 *
1801 * @returns The CPU mode.
1802 * @param pVCpu The cross context virtual CPU structure.
1803 */
1804VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1805{
1806 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1807 CPUMMODE enmMode;
1808 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1809 enmMode = CPUMMODE_REAL;
1810 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1811 enmMode = CPUMMODE_PROTECTED;
1812 else
1813 enmMode = CPUMMODE_LONG;
1814
1815 return enmMode;
1816}
1817
1818
1819/**
1820 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1821 *
1822 * @returns 16, 32 or 64.
1823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1824 */
1825VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1826{
1827 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1828
1829 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1830 return 16;
1831
1832 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1833 {
1834 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1835 return 16;
1836 }
1837
1838 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1839 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1840 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1841 return 64;
1842
1843 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1844 return 32;
1845
1846 return 16;
1847}
1848
1849
1850VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1851{
1852 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1853
1854 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1855 return DISCPUMODE_16BIT;
1856
1857 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1858 {
1859 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1860 return DISCPUMODE_16BIT;
1861 }
1862
1863 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1864 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1865 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1866 return DISCPUMODE_64BIT;
1867
1868 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1869 return DISCPUMODE_32BIT;
1870
1871 return DISCPUMODE_16BIT;
1872}
1873
1874
1875/**
1876 * Gets the guest MXCSR_MASK value.
1877 *
1878 * This does not access the x87 state, but the value we determined at VM
1879 * initialization.
1880 *
1881 * @returns MXCSR mask.
1882 * @param pVM The cross context VM structure.
1883 */
1884VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1885{
1886 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1887}
1888
1889
1890/**
1891 * Returns whether the guest has physical interrupts enabled.
1892 *
1893 * @returns @c true if interrupts are enabled, @c false otherwise.
1894 * @param pVCpu The cross context virtual CPU structure.
1895 *
1896 * @remarks Warning! This function does -not- take into account the global-interrupt
1897 * flag (GIF).
1898 */
1899VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1900{
1901 switch (CPUMGetGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1902 {
1903 case CPUMHWVIRT_NONE:
1904 default:
1905 return pVCpu->cpum.s.Guest.eflags.Bits.u1IF;
1906 case CPUMHWVIRT_VMX:
1907 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
1908 case CPUMHWVIRT_SVM:
1909 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1910 }
1911}
1912
1913
1914/**
1915 * Returns whether the nested-guest has virtual interrupts enabled.
1916 *
1917 * @returns @c true if interrupts are enabled, @c false otherwise.
1918 * @param pVCpu The cross context virtual CPU structure.
1919 *
1920 * @remarks Warning! This function does -not- take into account the global-interrupt
1921 * flag (GIF).
1922 */
1923VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1924{
1925 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1926 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
1927
1928 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1929 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
1930
1931 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1932 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
1933}
1934
1935
1936/**
1937 * Calculates the interruptiblity of the guest.
1938 *
1939 * @returns Interruptibility level.
1940 * @param pVCpu The cross context virtual CPU structure.
1941 */
1942VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1943{
1944#if 1
1945 /* Global-interrupt flag blocks pretty much everything we care about here. */
1946 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1947 {
1948 /*
1949 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1950 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1951 * or raw-mode). Hence we use the function below which handles the details.
1952 */
1953 if ( pVCpu->cpum.s.Guest.fInhibit == 0
1954 || ( !(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI)
1955 && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
1956 {
1957 /** @todo OPT: this next call should be inlined! */
1958 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
1959 {
1960 /** @todo OPT: type this out as it repeats tests. */
1961 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1962 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1963 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1964
1965 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1966 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1967 }
1968 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1969 }
1970
1971 /*
1972 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1973 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1974 * However, there is some uncertainity regarding the converse, i.e. whether
1975 * NMI-blocking until IRET blocks delivery of physical interrupts.
1976 *
1977 * See Intel spec. 25.4.1 "Event Blocking".
1978 */
1979 /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
1980 * 25.4.1 is only applicable to VMX non-root mode. In root mode /
1981 * non-VMX mode, I have not see any evidence in the intel manuals that
1982 * NMIs are not blocked when in an interrupt shadow. Section "6.7
1983 * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
1984 */
1985 if (!(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI))
1986 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1987 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1988 }
1989 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1990#else
1991 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1992 {
1993 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1994 {
1995 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1996 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1997
1998 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1999 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2000 {
2001 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2002 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2003 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2004 }
2005 AssertFailed();
2006 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2007 }
2008 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2009 }
2010 else
2011 {
2012 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2013 {
2014 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2015 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2016 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2017 }
2018 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2019 }
2020#endif
2021}
2022
2023
2024/**
2025 * Checks whether the SVM nested-guest has physical interrupts enabled.
2026 *
2027 * @returns true if interrupts are enabled, false otherwise.
2028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2029 * @param pCtx The guest-CPU context.
2030 *
2031 * @remarks This does -not- take into account the global-interrupt flag.
2032 */
2033VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2034{
2035 /** @todo Optimization: Avoid this function call and use a pointer to the
2036 * relevant eflags instead (setup during VMRUN instruction emulation). */
2037 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2038
2039 X86EFLAGS fEFlags;
2040 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2041 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2042 else
2043 fEFlags.u = pCtx->eflags.u;
2044
2045 return fEFlags.Bits.u1IF;
2046}
2047
2048
2049/**
2050 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2051 * for injection by VMRUN instruction) interrupts.
2052 *
2053 * @returns VBox status code.
2054 * @retval true if it's ready, false otherwise.
2055 *
2056 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2057 * @param pCtx The guest-CPU context.
2058 */
2059VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2060{
2061 RT_NOREF(pVCpu);
2062 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2063
2064 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
2065 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2066 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2067 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2068 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2069 return false;
2070
2071 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2072}
2073
2074
2075/**
2076 * Gets the pending SVM nested-guest interruptvector.
2077 *
2078 * @returns The nested-guest interrupt to inject.
2079 * @param pCtx The guest-CPU context.
2080 */
2081VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2082{
2083 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
2084}
2085
2086
2087/**
2088 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2089 *
2090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2091 * @param pCtx The guest-CPU context.
2092 */
2093VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2094{
2095 /*
2096 * Reload the guest's "host state".
2097 */
2098 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2099 pCtx->es = pHostState->es;
2100 pCtx->cs = pHostState->cs;
2101 pCtx->ss = pHostState->ss;
2102 pCtx->ds = pHostState->ds;
2103 pCtx->gdtr = pHostState->gdtr;
2104 pCtx->idtr = pHostState->idtr;
2105 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2106 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2107 pCtx->cr3 = pHostState->uCr3;
2108 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2109 pCtx->rflags.u = pHostState->rflags.u;
2110 pCtx->rflags.Bits.u1VM = 0;
2111 pCtx->rip = pHostState->uRip;
2112 pCtx->rsp = pHostState->uRsp;
2113 pCtx->rax = pHostState->uRax;
2114 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2115 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2116 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2117
2118 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2119 * raise \#GP(0) in the guest. */
2120
2121 /** @todo check the loaded host-state for consistency. Figure out what
2122 * exactly this involves? */
2123}
2124
2125
2126/**
2127 * Saves the host-state to the host-state save area as part of a VMRUN.
2128 *
2129 * @param pCtx The guest-CPU context.
2130 * @param cbInstr The length of the VMRUN instruction in bytes.
2131 */
2132VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2133{
2134 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2135 pHostState->es = pCtx->es;
2136 pHostState->cs = pCtx->cs;
2137 pHostState->ss = pCtx->ss;
2138 pHostState->ds = pCtx->ds;
2139 pHostState->gdtr = pCtx->gdtr;
2140 pHostState->idtr = pCtx->idtr;
2141 pHostState->uEferMsr = pCtx->msrEFER;
2142 pHostState->uCr0 = pCtx->cr0;
2143 pHostState->uCr3 = pCtx->cr3;
2144 pHostState->uCr4 = pCtx->cr4;
2145 pHostState->rflags.u = pCtx->rflags.u;
2146 pHostState->uRip = pCtx->rip + cbInstr;
2147 pHostState->uRsp = pCtx->rsp;
2148 pHostState->uRax = pCtx->rax;
2149}
2150
2151
2152/**
2153 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2154 * nested-guest.
2155 *
2156 * @returns The TSC offset after applying any nested-guest TSC offset.
2157 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2158 * @param uTscValue The guest TSC.
2159 *
2160 * @sa CPUMRemoveNestedGuestTscOffset.
2161 */
2162VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2163{
2164 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2165 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2166 {
2167 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2168 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2169 return uTscValue;
2170 }
2171
2172 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2173 {
2174 uint64_t offTsc;
2175 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2176 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2177 return uTscValue + offTsc;
2178 }
2179 return uTscValue;
2180}
2181
2182
2183/**
2184 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2185 * guest.
2186 *
2187 * @returns The TSC offset after removing any nested-guest TSC offset.
2188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2189 * @param uTscValue The nested-guest TSC.
2190 *
2191 * @sa CPUMApplyNestedGuestTscOffset.
2192 */
2193VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2194{
2195 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2196 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2197 {
2198 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2199 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2200 return uTscValue;
2201 }
2202
2203 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2204 {
2205 uint64_t offTsc;
2206 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2207 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2208 return uTscValue - offTsc;
2209 }
2210 return uTscValue;
2211}
2212
2213
2214/**
2215 * Used to dynamically imports state residing in NEM or HM.
2216 *
2217 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2218 *
2219 * @returns VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param fExtrnImport The fields to import.
2222 * @thread EMT(pVCpu)
2223 */
2224VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2225{
2226 VMCPU_ASSERT_EMT(pVCpu);
2227 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2228 {
2229 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2230 {
2231 case CPUMCTX_EXTRN_KEEPER_NEM:
2232 {
2233 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2234 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2235 return rc;
2236 }
2237
2238 case CPUMCTX_EXTRN_KEEPER_HM:
2239 {
2240#ifdef IN_RING0
2241 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2242 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2243 return rc;
2244#else
2245 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2246 return VINF_SUCCESS;
2247#endif
2248 }
2249 default:
2250 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2251 }
2252 }
2253 return VINF_SUCCESS;
2254}
2255
2256
2257/**
2258 * Gets valid CR4 bits for the guest.
2259 *
2260 * @returns Valid CR4 bits.
2261 * @param pVM The cross context VM structure.
2262 */
2263VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2264{
2265 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2266 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2267 | X86_CR4_TSD | X86_CR4_DE
2268 | X86_CR4_MCE | X86_CR4_PCE;
2269 if (pGuestFeatures->fPae)
2270 fMask |= X86_CR4_PAE;
2271 if (pGuestFeatures->fPge)
2272 fMask |= X86_CR4_PGE;
2273 if (pGuestFeatures->fPse)
2274 fMask |= X86_CR4_PSE;
2275 if (pGuestFeatures->fFxSaveRstor)
2276 fMask |= X86_CR4_OSFXSR;
2277 if (pGuestFeatures->fVmx)
2278 fMask |= X86_CR4_VMXE;
2279 if (pGuestFeatures->fXSaveRstor)
2280 fMask |= X86_CR4_OSXSAVE;
2281 if (pGuestFeatures->fPcid)
2282 fMask |= X86_CR4_PCIDE;
2283 if (pGuestFeatures->fFsGsBase)
2284 fMask |= X86_CR4_FSGSBASE;
2285 if (pGuestFeatures->fSse)
2286 fMask |= X86_CR4_OSXMMEEXCPT;
2287 return fMask;
2288}
2289
2290
2291/**
2292 * Sets the PAE PDPEs for the guest.
2293 *
2294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2295 * @param paPaePdpes The PAE PDPEs to set.
2296 */
2297VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2298{
2299 Assert(paPaePdpes);
2300 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2301 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2302 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2303}
2304
2305
2306/**
2307 * Gets the PAE PDPTEs for the guest.
2308 *
2309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2310 * @param paPaePdpes Where to store the PAE PDPEs.
2311 */
2312VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2313{
2314 Assert(paPaePdpes);
2315 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2316 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2317 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2318}
2319
2320
2321/**
2322 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2323 *
2324 * @returns VBox status code.
2325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2326 * @param uTimer The VMCS preemption timer value.
2327 * @param cShift The VMX-preemption timer shift (usually based on guest
2328 * VMX MSR rate).
2329 * @param pu64EntryTick Where to store the current tick when the timer is
2330 * programmed.
2331 * @thread EMT(pVCpu)
2332 */
2333VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2334{
2335 Assert(uTimer);
2336 Assert(cShift <= 31);
2337 Assert(pu64EntryTick);
2338 VMCPU_ASSERT_EMT(pVCpu);
2339 uint64_t const cTicksToNext = uTimer << cShift;
2340 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
2341}
2342
2343
2344/**
2345 * Stops the VMX-preemption timer from firing.
2346 *
2347 * @returns VBox status code.
2348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2349 * @thread EMT.
2350 *
2351 * @remarks This can be called during VM reset, so we cannot assume it will be on
2352 * the EMT corresponding to @c pVCpu.
2353 */
2354VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2355{
2356 /*
2357 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
2358 * However, we still get called during CPUMR3Init() and hence we need to check if we have
2359 * a valid timer object before trying to stop it.
2360 */
2361 int rc;
2362 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2363 if (hTimer != NIL_TMTIMERHANDLE)
2364 {
2365 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2366 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2367 if (rc == VINF_SUCCESS)
2368 {
2369 if (TMTimerIsActive(pVM, hTimer))
2370 TMTimerStop(pVM, hTimer);
2371 TMTimerUnlock(pVM, hTimer);
2372 }
2373 }
2374 else
2375 rc = VERR_NOT_FOUND;
2376 return rc;
2377}
2378
2379
2380/**
2381 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2382 *
2383 * @returns VMXMSRPM_XXX - the MSR permission.
2384 * @param pvMsrBitmap Pointer to the MSR bitmap.
2385 * @param idMsr The MSR to get permissions for.
2386 *
2387 * @sa hmR0VmxSetMsrPermission.
2388 */
2389VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2390{
2391 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2392
2393 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2394
2395 /*
2396 * MSR Layout:
2397 * Byte index MSR range Interpreted as
2398 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2399 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2400 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2401 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2402 *
2403 * A bit corresponding to an MSR within the above range causes a VM-exit
2404 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2405 * the MSR range, it always cause a VM-exit.
2406 *
2407 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2408 */
2409 uint32_t const offBitmapRead = 0;
2410 uint32_t const offBitmapWrite = 0x800;
2411 uint32_t offMsr;
2412 uint32_t iBit;
2413 if (idMsr <= UINT32_C(0x00001fff))
2414 {
2415 offMsr = 0;
2416 iBit = idMsr;
2417 }
2418 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2419 {
2420 offMsr = 0x400;
2421 iBit = idMsr - UINT32_C(0xc0000000);
2422 }
2423 else
2424 {
2425 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2426 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2427 }
2428
2429 /*
2430 * Get the MSR read permissions.
2431 */
2432 uint32_t fRet;
2433 uint32_t const offMsrRead = offBitmapRead + offMsr;
2434 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2435 if (ASMBitTest(pbMsrBitmap, (offMsrRead << 3) + iBit))
2436 fRet = VMXMSRPM_EXIT_RD;
2437 else
2438 fRet = VMXMSRPM_ALLOW_RD;
2439
2440 /*
2441 * Get the MSR write permissions.
2442 */
2443 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2444 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2445 if (ASMBitTest(pbMsrBitmap, (offMsrWrite << 3) + iBit))
2446 fRet |= VMXMSRPM_EXIT_WR;
2447 else
2448 fRet |= VMXMSRPM_ALLOW_WR;
2449
2450 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2451 return fRet;
2452}
2453
2454
2455/**
2456 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2457 * to see if causes a VM-exit.
2458 *
2459 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2460 * @param pbIoBitmap Pointer to I/O bitmap.
2461 * @param uPort The I/O port being accessed.
2462 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2463 */
2464static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
2465{
2466 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2467
2468 /*
2469 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2470 * VM-exit.
2471 *
2472 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2473 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2474 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2475 * access to -both- ports 0xffff and port 0 is a wrap around.
2476 *
2477 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2478 */
2479 uint32_t const uPortLast = uPort + cbAccess;
2480 if (uPortLast > 0x10000)
2481 return true;
2482
2483 /*
2484 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2485 */
2486 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2487 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2488 Assert(idxPermBit < 8);
2489 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2490 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2491
2492 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2493 RTUINT16U uPerm;
2494 uPerm.s.Lo = pbIoBitmap[offPerm];
2495 if (idxPermBit + cbAccess > 8)
2496 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
2497 else
2498 uPerm.s.Hi = 0;
2499
2500 /* If any bit for the access is 1, we must cause a VM-exit. */
2501 if (uPerm.u & fMask)
2502 return true;
2503
2504 return false;
2505}
2506
2507
2508/**
2509 * Returns whether the given VMCS field is valid and supported for the guest.
2510 *
2511 * @param pVM The cross context VM structure.
2512 * @param u64VmcsField The VMCS field.
2513 *
2514 * @remarks This takes into account the CPU features exposed to the guest.
2515 */
2516VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2517{
2518 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2519 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2520 if (!uFieldEncHi)
2521 { /* likely */ }
2522 else
2523 return false;
2524
2525 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2526 switch (uFieldEncLo)
2527 {
2528 /*
2529 * 16-bit fields.
2530 */
2531 /* Control fields. */
2532 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2533 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2534 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2535
2536 /* Guest-state fields. */
2537 case VMX_VMCS16_GUEST_ES_SEL:
2538 case VMX_VMCS16_GUEST_CS_SEL:
2539 case VMX_VMCS16_GUEST_SS_SEL:
2540 case VMX_VMCS16_GUEST_DS_SEL:
2541 case VMX_VMCS16_GUEST_FS_SEL:
2542 case VMX_VMCS16_GUEST_GS_SEL:
2543 case VMX_VMCS16_GUEST_LDTR_SEL:
2544 case VMX_VMCS16_GUEST_TR_SEL: return true;
2545 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2546 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2547
2548 /* Host-state fields. */
2549 case VMX_VMCS16_HOST_ES_SEL:
2550 case VMX_VMCS16_HOST_CS_SEL:
2551 case VMX_VMCS16_HOST_SS_SEL:
2552 case VMX_VMCS16_HOST_DS_SEL:
2553 case VMX_VMCS16_HOST_FS_SEL:
2554 case VMX_VMCS16_HOST_GS_SEL:
2555 case VMX_VMCS16_HOST_TR_SEL: return true;
2556
2557 /*
2558 * 64-bit fields.
2559 */
2560 /* Control fields. */
2561 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2562 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2563 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2564 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2565 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2566 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2567 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2568 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2569 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2570 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2571 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2572 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2573 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2574 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2575 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2576 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2577 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2578 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2579 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2580 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2581 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2582 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2583 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2584 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2585 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2586 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2587 case VMX_VMCS64_CTRL_EPTP_FULL:
2588 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2589 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2590 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2591 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2592 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2593 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2594 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2595 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2596 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2597 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2598 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2599 {
2600 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2601 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2602 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2603 }
2604 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2605 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2606 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2607 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2608 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2609 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2610 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2611 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2612 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2613 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2614 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2615 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
2616
2617 /* Read-only data fields. */
2618 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2619 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2620
2621 /* Guest-state fields. */
2622 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2623 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2624 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2625 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2626 case VMX_VMCS64_GUEST_PAT_FULL:
2627 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2628 case VMX_VMCS64_GUEST_EFER_FULL:
2629 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2630 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2631 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2632 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2633 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2634 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2635 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2636 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2637 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2638
2639 /* Host-state fields. */
2640 case VMX_VMCS64_HOST_PAT_FULL:
2641 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2642 case VMX_VMCS64_HOST_EFER_FULL:
2643 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2644
2645 /*
2646 * 32-bit fields.
2647 */
2648 /* Control fields. */
2649 case VMX_VMCS32_CTRL_PIN_EXEC:
2650 case VMX_VMCS32_CTRL_PROC_EXEC:
2651 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2652 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2653 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2654 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2655 case VMX_VMCS32_CTRL_EXIT:
2656 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2657 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2658 case VMX_VMCS32_CTRL_ENTRY:
2659 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2660 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2661 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2662 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2663 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2664 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2665 case VMX_VMCS32_CTRL_PLE_GAP:
2666 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2667
2668 /* Read-only data fields. */
2669 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2670 case VMX_VMCS32_RO_EXIT_REASON:
2671 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2672 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2673 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2674 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2675 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2676 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2677
2678 /* Guest-state fields. */
2679 case VMX_VMCS32_GUEST_ES_LIMIT:
2680 case VMX_VMCS32_GUEST_CS_LIMIT:
2681 case VMX_VMCS32_GUEST_SS_LIMIT:
2682 case VMX_VMCS32_GUEST_DS_LIMIT:
2683 case VMX_VMCS32_GUEST_FS_LIMIT:
2684 case VMX_VMCS32_GUEST_GS_LIMIT:
2685 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2686 case VMX_VMCS32_GUEST_TR_LIMIT:
2687 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2688 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2689 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2690 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2691 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2692 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2693 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2694 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2695 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2696 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2697 case VMX_VMCS32_GUEST_INT_STATE:
2698 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2699 case VMX_VMCS32_GUEST_SMBASE:
2700 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2701 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2702
2703 /* Host-state fields. */
2704 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2705
2706 /*
2707 * Natural-width fields.
2708 */
2709 /* Control fields. */
2710 case VMX_VMCS_CTRL_CR0_MASK:
2711 case VMX_VMCS_CTRL_CR4_MASK:
2712 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2713 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2714 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2715 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2716 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2717 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2718
2719 /* Read-only data fields. */
2720 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2721 case VMX_VMCS_RO_IO_RCX:
2722 case VMX_VMCS_RO_IO_RSI:
2723 case VMX_VMCS_RO_IO_RDI:
2724 case VMX_VMCS_RO_IO_RIP:
2725 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2726
2727 /* Guest-state fields. */
2728 case VMX_VMCS_GUEST_CR0:
2729 case VMX_VMCS_GUEST_CR3:
2730 case VMX_VMCS_GUEST_CR4:
2731 case VMX_VMCS_GUEST_ES_BASE:
2732 case VMX_VMCS_GUEST_CS_BASE:
2733 case VMX_VMCS_GUEST_SS_BASE:
2734 case VMX_VMCS_GUEST_DS_BASE:
2735 case VMX_VMCS_GUEST_FS_BASE:
2736 case VMX_VMCS_GUEST_GS_BASE:
2737 case VMX_VMCS_GUEST_LDTR_BASE:
2738 case VMX_VMCS_GUEST_TR_BASE:
2739 case VMX_VMCS_GUEST_GDTR_BASE:
2740 case VMX_VMCS_GUEST_IDTR_BASE:
2741 case VMX_VMCS_GUEST_DR7:
2742 case VMX_VMCS_GUEST_RSP:
2743 case VMX_VMCS_GUEST_RIP:
2744 case VMX_VMCS_GUEST_RFLAGS:
2745 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2746 case VMX_VMCS_GUEST_SYSENTER_ESP:
2747 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2748
2749 /* Host-state fields. */
2750 case VMX_VMCS_HOST_CR0:
2751 case VMX_VMCS_HOST_CR3:
2752 case VMX_VMCS_HOST_CR4:
2753 case VMX_VMCS_HOST_FS_BASE:
2754 case VMX_VMCS_HOST_GS_BASE:
2755 case VMX_VMCS_HOST_TR_BASE:
2756 case VMX_VMCS_HOST_GDTR_BASE:
2757 case VMX_VMCS_HOST_IDTR_BASE:
2758 case VMX_VMCS_HOST_SYSENTER_ESP:
2759 case VMX_VMCS_HOST_SYSENTER_EIP:
2760 case VMX_VMCS_HOST_RSP:
2761 case VMX_VMCS_HOST_RIP: return true;
2762 }
2763
2764 return false;
2765}
2766
2767
2768/**
2769 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2770 *
2771 * @returns @c true if it causes a VM-exit, @c false otherwise.
2772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2773 * @param u16Port The I/O port being accessed.
2774 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2775 */
2776VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2777{
2778 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2779 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2780 return true;
2781
2782 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2783 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
2784
2785 return false;
2786}
2787
2788
2789/**
2790 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2791 *
2792 * @returns @c true if it causes a VM-exit, @c false otherwise.
2793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2794 * @param uNewCr3 The CR3 value being written.
2795 */
2796VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2797{
2798 /*
2799 * If the CR3-load exiting control is set and the new CR3 value does not
2800 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2801 *
2802 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2803 */
2804 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
2805 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2806 {
2807 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
2808 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2809
2810 /* If the CR3-target count is 0, cause a VM-exit. */
2811 if (uCr3TargetCount == 0)
2812 return true;
2813
2814 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2815 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2816 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2817 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2818 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2819 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
2820 return true;
2821 }
2822 return false;
2823}
2824
2825
2826/**
2827 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2828 * VM-exit or not.
2829 *
2830 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2831 * @param pVCpu The cross context virtual CPU structure.
2832 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2833 * VMX_EXIT_VMREAD).
2834 * @param u64VmcsField The VMCS field.
2835 */
2836VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2837{
2838 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2839 Assert( uExitReason == VMX_EXIT_VMREAD
2840 || uExitReason == VMX_EXIT_VMWRITE);
2841
2842 /*
2843 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2844 */
2845 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2846 return true;
2847
2848 /*
2849 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2850 * is intercepted. This excludes any reserved bits in the valid parts of the field
2851 * encoding (i.e. bit 12).
2852 */
2853 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2854 return true;
2855
2856 /*
2857 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2858 */
2859 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2860 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2861 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2862 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
2863 Assert(pbBitmap);
2864 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2865 return ASMBitTest(pbBitmap, (u32VmcsField << 3) + (u32VmcsField & 7));
2866}
2867
2868
2869
2870/**
2871 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2872 *
2873 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2874 * @param u16Port The IO port being accessed.
2875 * @param enmIoType The type of IO access.
2876 * @param cbReg The IO operand size in bytes.
2877 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2878 * @param iEffSeg The effective segment number.
2879 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2880 * @param fStrIo Whether this is a string IO instruction.
2881 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2882 * Optional, can be NULL.
2883 */
2884VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2885 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2886 PSVMIOIOEXITINFO pIoExitInfo)
2887{
2888 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2889 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2890
2891 /*
2892 * The IOPM layout:
2893 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2894 * two 4K pages.
2895 *
2896 * For IO instructions that access more than a single byte, the permission bits
2897 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2898 *
2899 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2900 * we need 3 extra bits beyond the second 4K page.
2901 */
2902 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2903
2904 uint16_t const offIopm = u16Port >> 3;
2905 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2906 uint8_t const cShift = u16Port - (offIopm << 3);
2907 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2908
2909 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2910 Assert(pbIopm);
2911 pbIopm += offIopm;
2912 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2913 if (u16Iopm & fIopmMask)
2914 {
2915 if (pIoExitInfo)
2916 {
2917 static const uint32_t s_auIoOpSize[] =
2918 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2919
2920 static const uint32_t s_auIoAddrSize[] =
2921 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2922
2923 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2924 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2925 pIoExitInfo->n.u1Str = fStrIo;
2926 pIoExitInfo->n.u1Rep = fRep;
2927 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2928 pIoExitInfo->n.u1Type = enmIoType;
2929 pIoExitInfo->n.u16Port = u16Port;
2930 }
2931 return true;
2932 }
2933
2934 /** @todo remove later (for debugging as VirtualBox always traps all IO
2935 * intercepts). */
2936 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2937 return false;
2938}
2939
2940
2941/**
2942 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2943 *
2944 * @returns VBox status code.
2945 * @param idMsr The MSR being requested.
2946 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2947 * bitmap for @a idMsr.
2948 * @param puMsrpmBit Where to store the bit offset starting at the byte
2949 * returned in @a pbOffMsrpm.
2950 */
2951VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2952{
2953 Assert(pbOffMsrpm);
2954 Assert(puMsrpmBit);
2955
2956 /*
2957 * MSRPM Layout:
2958 * Byte offset MSR range
2959 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2960 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2961 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2962 * 0x1800 - 0x1fff Reserved
2963 *
2964 * Each MSR is represented by 2 permission bits (read and write).
2965 */
2966 if (idMsr <= 0x00001fff)
2967 {
2968 /* Pentium-compatible MSRs. */
2969 uint32_t const bitoffMsr = idMsr << 1;
2970 *pbOffMsrpm = bitoffMsr >> 3;
2971 *puMsrpmBit = bitoffMsr & 7;
2972 return VINF_SUCCESS;
2973 }
2974
2975 if ( idMsr >= 0xc0000000
2976 && idMsr <= 0xc0001fff)
2977 {
2978 /* AMD Sixth Generation x86 Processor MSRs. */
2979 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2980 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2981 *puMsrpmBit = bitoffMsr & 7;
2982 return VINF_SUCCESS;
2983 }
2984
2985 if ( idMsr >= 0xc0010000
2986 && idMsr <= 0xc0011fff)
2987 {
2988 /* AMD Seventh and Eighth Generation Processor MSRs. */
2989 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
2990 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
2991 *puMsrpmBit = bitoffMsr & 7;
2992 return VINF_SUCCESS;
2993 }
2994
2995 *pbOffMsrpm = 0;
2996 *puMsrpmBit = 0;
2997 return VERR_OUT_OF_RANGE;
2998}
2999
3000
3001/**
3002 * Checks whether the guest is in VMX non-root mode and using EPT paging.
3003 *
3004 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3005 * @param pVCpu The cross context virtual CPU structure.
3006 */
3007VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
3008{
3009 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
3010}
3011
3012
3013/**
3014 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
3015 * nested-guest is in PAE mode.
3016 *
3017 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3018 * @param pVCpu The cross context virtual CPU structure.
3019 */
3020VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3021{
3022 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3023 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3024}
3025
3026
3027/**
3028 * Returns the guest-physical address of the APIC-access page when executing a
3029 * nested-guest.
3030 *
3031 * @returns The APIC-access page guest-physical address.
3032 * @param pVCpu The cross context virtual CPU structure.
3033 */
3034VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3035{
3036 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3037}
3038
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette