VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 80015

最後變更 在這個檔案從80015是 80007,由 vboxsync 提交於 6 年 前

VMM: Kicking out raw-mode (work in progress). bugref:9517

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 119.5 KB
 
1/* $Id: CPUMAllRegs.cpp 80007 2019-07-26 13:57:38Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#ifndef IN_RC
30# include <VBox/vmm/nem.h>
31# include <VBox/vmm/hm.h>
32#endif
33#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
34# include <VBox/vmm/selm.h>
35#endif
36#include "CPUMInternal.h"
37#include <VBox/vmm/vm.h>
38#include <VBox/err.h>
39#include <VBox/dis.h>
40#include <VBox/log.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/vmm/tm.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/asm-amd64-x86.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50/** Disable stack frame pointer generation here. */
51#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
52# pragma optimize("y", off)
53#endif
54
55AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
56AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62/**
63 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
64 *
65 * @returns Pointer to the Virtual CPU.
66 * @param a_pGuestCtx Pointer to the guest context.
67 */
68#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
69
70/**
71 * Lazily loads the hidden parts of a selector register when using raw-mode.
72 */
73#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
74# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
75 do \
76 { \
77 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
78 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
79 } while (0)
80#else
81# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
82 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
83#endif
84
85/** @def CPUM_INT_ASSERT_NOT_EXTRN
86 * Macro for asserting that @a a_fNotExtrn are present.
87 *
88 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
89 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
90 */
91#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
92 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
93 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
94
95
96
97
98#ifdef VBOX_WITH_RAW_MODE_NOT_R0
99
100/**
101 * Does the lazy hidden selector register loading.
102 *
103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
104 * @param pSReg The selector register to lazily load hidden parts of.
105 */
106static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
107{
108 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
109 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
110 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
111
112 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
113 {
114 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
115 pSReg->Attr.u = 0;
116 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
117 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
118 pSReg->Attr.n.u2Dpl = 3;
119 pSReg->Attr.n.u1Present = 1;
120 pSReg->u32Limit = 0x0000ffff;
121 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
122 pSReg->ValidSel = pSReg->Sel;
123 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
124 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
125 }
126 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
127 {
128 /* Real mode - leave the limit and flags alone here, at least for now. */
129 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
130 pSReg->ValidSel = pSReg->Sel;
131 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
132 }
133 else
134 {
135 /* Protected mode - get it from the selector descriptor tables. */
136 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
137 {
138 Assert(!CPUMIsGuestInLongMode(pVCpu));
139 pSReg->Sel = 0;
140 pSReg->u64Base = 0;
141 pSReg->u32Limit = 0;
142 pSReg->Attr.u = 0;
143 pSReg->ValidSel = 0;
144 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
145 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
146 }
147 else
148 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
149 }
150}
151
152
153/**
154 * Makes sure the hidden CS and SS selector registers are valid, loading them if
155 * necessary.
156 *
157 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
158 */
159VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
160{
161 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
162 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
163}
164
165
166/**
167 * Loads a the hidden parts of a selector register.
168 *
169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
170 * @param pSReg The selector register to lazily load hidden parts of.
171 */
172VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
173{
174 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
175}
176
177#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
178
179
180/**
181 * Obsolete.
182 *
183 * We don't support nested hypervisor context interrupts or traps. Life is much
184 * simpler when we don't. It's also slightly faster at times.
185 *
186 * @param pVCpu The cross context virtual CPU structure.
187 */
188VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
189{
190 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
191}
192
193
194/**
195 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
196 *
197 * @param pVCpu The cross context virtual CPU structure.
198 */
199VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
200{
201 return &pVCpu->cpum.s.Hyper;
202}
203
204
205VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
206{
207 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
208 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
209}
210
211
212VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
213{
214 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
215 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
216}
217
218
219VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
220{
221 pVCpu->cpum.s.Hyper.cr3 = cr3;
222
223#ifdef IN_RC
224 /* Update the current CR3. */
225 ASMSetCR3(cr3);
226#endif
227}
228
229VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
230{
231 return pVCpu->cpum.s.Hyper.cr3;
232}
233
234
235VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
236{
237 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
238}
239
240
241VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
242{
243 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
244}
245
246
247VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
248{
249 pVCpu->cpum.s.Hyper.es.Sel = SelES;
250}
251
252
253VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
254{
255 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
256}
257
258
259VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
260{
261 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
262}
263
264
265VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
266{
267 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
268}
269
270
271VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
272{
273 pVCpu->cpum.s.Hyper.esp = u32ESP;
274}
275
276
277VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
278{
279 pVCpu->cpum.s.Hyper.esp = u32ESP;
280}
281
282
283VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
284{
285 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
286 return VINF_SUCCESS;
287}
288
289
290VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
291{
292 pVCpu->cpum.s.Hyper.eip = u32EIP;
293}
294
295
296/**
297 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
298 * EFLAGS and EIP prior to resuming guest execution.
299 *
300 * All general register not given as a parameter will be set to 0. The EFLAGS
301 * register will be set to sane values for C/C++ code execution with interrupts
302 * disabled and IOPL 0.
303 *
304 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
305 * @param u32EIP The EIP value.
306 * @param u32ESP The ESP value.
307 * @param u32EAX The EAX value.
308 * @param u32EDX The EDX value.
309 */
310VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
311{
312 pVCpu->cpum.s.Hyper.eip = u32EIP;
313 pVCpu->cpum.s.Hyper.esp = u32ESP;
314 pVCpu->cpum.s.Hyper.eax = u32EAX;
315 pVCpu->cpum.s.Hyper.edx = u32EDX;
316 pVCpu->cpum.s.Hyper.ecx = 0;
317 pVCpu->cpum.s.Hyper.ebx = 0;
318 pVCpu->cpum.s.Hyper.ebp = 0;
319 pVCpu->cpum.s.Hyper.esi = 0;
320 pVCpu->cpum.s.Hyper.edi = 0;
321 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
322}
323
324
325VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
326{
327 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
328}
329
330
331VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
332{
333 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
334}
335
336
337/** @def MAYBE_LOAD_DRx
338 * Macro for updating DRx values in raw-mode and ring-0 contexts.
339 */
340#ifdef IN_RING0
341# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
342# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
343 do { \
344 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
345 a_fnLoad(a_uValue); \
346 else \
347 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
348 } while (0)
349# else
350# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
351 do { \
352 a_fnLoad(a_uValue); \
353 } while (0)
354# endif
355
356#elif defined(IN_RC)
357# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
358 do { \
359 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
360 { a_fnLoad(a_uValue); } \
361 } while (0)
362
363#else
364# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
365#endif
366
367VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
368{
369 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
370 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
371}
372
373
374VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
375{
376 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
377 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
378}
379
380
381VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
382{
383 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
384 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
385}
386
387
388VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
389{
390 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
391 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
392}
393
394
395VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
396{
397 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
398}
399
400
401VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
402{
403 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
404#ifdef IN_RC
405 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
406#endif
407}
408
409
410VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
411{
412 return pVCpu->cpum.s.Hyper.cs.Sel;
413}
414
415
416VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
417{
418 return pVCpu->cpum.s.Hyper.ds.Sel;
419}
420
421
422VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
423{
424 return pVCpu->cpum.s.Hyper.es.Sel;
425}
426
427
428VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
429{
430 return pVCpu->cpum.s.Hyper.fs.Sel;
431}
432
433
434VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
435{
436 return pVCpu->cpum.s.Hyper.gs.Sel;
437}
438
439
440VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
441{
442 return pVCpu->cpum.s.Hyper.ss.Sel;
443}
444
445
446VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
447{
448 return pVCpu->cpum.s.Hyper.eax;
449}
450
451
452VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
453{
454 return pVCpu->cpum.s.Hyper.ebx;
455}
456
457
458VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
459{
460 return pVCpu->cpum.s.Hyper.ecx;
461}
462
463
464VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
465{
466 return pVCpu->cpum.s.Hyper.edx;
467}
468
469
470VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
471{
472 return pVCpu->cpum.s.Hyper.esi;
473}
474
475
476VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
477{
478 return pVCpu->cpum.s.Hyper.edi;
479}
480
481
482VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
483{
484 return pVCpu->cpum.s.Hyper.ebp;
485}
486
487
488VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
489{
490 return pVCpu->cpum.s.Hyper.esp;
491}
492
493
494VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
495{
496 return pVCpu->cpum.s.Hyper.eflags.u32;
497}
498
499
500VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
501{
502 return pVCpu->cpum.s.Hyper.eip;
503}
504
505
506VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
507{
508 return pVCpu->cpum.s.Hyper.rip;
509}
510
511
512VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
513{
514 if (pcbLimit)
515 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
516 return pVCpu->cpum.s.Hyper.idtr.pIdt;
517}
518
519
520VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
521{
522 if (pcbLimit)
523 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
524 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
525}
526
527
528VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
529{
530 return pVCpu->cpum.s.Hyper.ldtr.Sel;
531}
532
533
534VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
535{
536 return pVCpu->cpum.s.Hyper.dr[0];
537}
538
539
540VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
541{
542 return pVCpu->cpum.s.Hyper.dr[1];
543}
544
545
546VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
547{
548 return pVCpu->cpum.s.Hyper.dr[2];
549}
550
551
552VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
553{
554 return pVCpu->cpum.s.Hyper.dr[3];
555}
556
557
558VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
559{
560 return pVCpu->cpum.s.Hyper.dr[6];
561}
562
563
564VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
565{
566 return pVCpu->cpum.s.Hyper.dr[7];
567}
568
569
570/**
571 * Gets the pointer to the internal CPUMCTXCORE structure.
572 * This is only for reading in order to save a few calls.
573 *
574 * @param pVCpu The cross context virtual CPU structure.
575 */
576VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
577{
578 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
579}
580
581
582/**
583 * Queries the pointer to the internal CPUMCTX structure.
584 *
585 * @returns The CPUMCTX pointer.
586 * @param pVCpu The cross context virtual CPU structure.
587 */
588VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
589{
590 return &pVCpu->cpum.s.Guest;
591}
592
593
594/**
595 * Queries the pointer to the internal CPUMCTXMSRS structure.
596 *
597 * This is for NEM only.
598 *
599 * @returns The CPUMCTX pointer.
600 * @param pVCpu The cross context virtual CPU structure.
601 */
602VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
603{
604 return &pVCpu->cpum.s.GuestMsrs;
605}
606
607
608VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
609{
610#ifdef VBOX_WITH_RAW_MODE_NOT_R0
611 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
612 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
613#endif
614 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
615 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
616 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
617 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
618 return VINF_SUCCESS; /* formality, consider it void. */
619}
620
621
622VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
623{
624#ifdef VBOX_WITH_RAW_MODE_NOT_R0
625 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
626 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
627#endif
628 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
629 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
630 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
631 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
632 return VINF_SUCCESS; /* formality, consider it void. */
633}
634
635
636VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
637{
638#ifdef VBOX_WITH_RAW_MODE_NOT_R0
639 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
640 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
641#endif
642 pVCpu->cpum.s.Guest.tr.Sel = tr;
643 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
644 return VINF_SUCCESS; /* formality, consider it void. */
645}
646
647
648VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
649{
650#ifdef VBOX_WITH_RAW_MODE_NOT_R0
651 if ( ( ldtr != 0
652 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
653 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
654 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
655#endif
656 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
657 /* The caller will set more hidden bits if it has them. */
658 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
659 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
660 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
661 return VINF_SUCCESS; /* formality, consider it void. */
662}
663
664
665/**
666 * Set the guest CR0.
667 *
668 * When called in GC, the hyper CR0 may be updated if that is
669 * required. The caller only has to take special action if AM,
670 * WP, PG or PE changes.
671 *
672 * @returns VINF_SUCCESS (consider it void).
673 * @param pVCpu The cross context virtual CPU structure.
674 * @param cr0 The new CR0 value.
675 */
676VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
677{
678#ifdef IN_RC
679 /*
680 * Check if we need to change hypervisor CR0 because
681 * of math stuff.
682 */
683 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
684 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
685 {
686 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
687 {
688 /*
689 * We haven't loaded the guest FPU state yet, so TS and MT are both set
690 * and EM should be reflecting the guest EM (it always does this).
691 */
692 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
693 {
694 uint32_t HyperCR0 = ASMGetCR0();
695 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
696 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
697 HyperCR0 &= ~X86_CR0_EM;
698 HyperCR0 |= cr0 & X86_CR0_EM;
699 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
700 ASMSetCR0(HyperCR0);
701 }
702# ifdef VBOX_STRICT
703 else
704 {
705 uint32_t HyperCR0 = ASMGetCR0();
706 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
707 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
708 }
709# endif
710 }
711 else
712 {
713 /*
714 * Already loaded the guest FPU state, so we're just mirroring
715 * the guest flags.
716 */
717 uint32_t HyperCR0 = ASMGetCR0();
718 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
719 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
720 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
721 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
722 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
723 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
724 ASMSetCR0(HyperCR0);
725 }
726 }
727#endif /* IN_RC */
728
729 /*
730 * Check for changes causing TLB flushes (for REM).
731 * The caller is responsible for calling PGM when appropriate.
732 */
733 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
734 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
735 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
736 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
737
738 /*
739 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
740 */
741 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
742 PGMCr0WpEnabled(pVCpu);
743
744 /* The ET flag is settable on a 386 and hardwired on 486+. */
745 if ( !(cr0 & X86_CR0_ET)
746 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
747 cr0 |= X86_CR0_ET;
748
749 pVCpu->cpum.s.Guest.cr0 = cr0;
750 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
751 return VINF_SUCCESS;
752}
753
754
755VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
756{
757 pVCpu->cpum.s.Guest.cr2 = cr2;
758 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
759 return VINF_SUCCESS;
760}
761
762
763VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
764{
765 pVCpu->cpum.s.Guest.cr3 = cr3;
766 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
767 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
768 return VINF_SUCCESS;
769}
770
771
772VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
773{
774 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
775
776 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
777 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
778 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
779
780 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
781 pVCpu->cpum.s.Guest.cr4 = cr4;
782 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
788{
789 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
790 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
791 return VINF_SUCCESS;
792}
793
794
795VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
796{
797 pVCpu->cpum.s.Guest.eip = eip;
798 return VINF_SUCCESS;
799}
800
801
802VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
803{
804 pVCpu->cpum.s.Guest.eax = eax;
805 return VINF_SUCCESS;
806}
807
808
809VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
810{
811 pVCpu->cpum.s.Guest.ebx = ebx;
812 return VINF_SUCCESS;
813}
814
815
816VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
817{
818 pVCpu->cpum.s.Guest.ecx = ecx;
819 return VINF_SUCCESS;
820}
821
822
823VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
824{
825 pVCpu->cpum.s.Guest.edx = edx;
826 return VINF_SUCCESS;
827}
828
829
830VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
831{
832 pVCpu->cpum.s.Guest.esp = esp;
833 return VINF_SUCCESS;
834}
835
836
837VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
838{
839 pVCpu->cpum.s.Guest.ebp = ebp;
840 return VINF_SUCCESS;
841}
842
843
844VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
845{
846 pVCpu->cpum.s.Guest.esi = esi;
847 return VINF_SUCCESS;
848}
849
850
851VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
852{
853 pVCpu->cpum.s.Guest.edi = edi;
854 return VINF_SUCCESS;
855}
856
857
858VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
859{
860 pVCpu->cpum.s.Guest.ss.Sel = ss;
861 return VINF_SUCCESS;
862}
863
864
865VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
866{
867 pVCpu->cpum.s.Guest.cs.Sel = cs;
868 return VINF_SUCCESS;
869}
870
871
872VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
873{
874 pVCpu->cpum.s.Guest.ds.Sel = ds;
875 return VINF_SUCCESS;
876}
877
878
879VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
880{
881 pVCpu->cpum.s.Guest.es.Sel = es;
882 return VINF_SUCCESS;
883}
884
885
886VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
887{
888 pVCpu->cpum.s.Guest.fs.Sel = fs;
889 return VINF_SUCCESS;
890}
891
892
893VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
894{
895 pVCpu->cpum.s.Guest.gs.Sel = gs;
896 return VINF_SUCCESS;
897}
898
899
900VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
901{
902 pVCpu->cpum.s.Guest.msrEFER = val;
903 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
904}
905
906
907VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
908{
909 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
910 if (pcbLimit)
911 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
912 return pVCpu->cpum.s.Guest.idtr.pIdt;
913}
914
915
916VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
917{
918 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
919 if (pHidden)
920 *pHidden = pVCpu->cpum.s.Guest.tr;
921 return pVCpu->cpum.s.Guest.tr.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
926{
927 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
928 return pVCpu->cpum.s.Guest.cs.Sel;
929}
930
931
932VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
933{
934 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
935 return pVCpu->cpum.s.Guest.ds.Sel;
936}
937
938
939VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
940{
941 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
942 return pVCpu->cpum.s.Guest.es.Sel;
943}
944
945
946VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
947{
948 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
949 return pVCpu->cpum.s.Guest.fs.Sel;
950}
951
952
953VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
954{
955 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
956 return pVCpu->cpum.s.Guest.gs.Sel;
957}
958
959
960VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
961{
962 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
963 return pVCpu->cpum.s.Guest.ss.Sel;
964}
965
966
967VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
968{
969 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
970 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
971 if ( !CPUMIsGuestInLongMode(pVCpu)
972 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
973 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
974 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
975}
976
977
978VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
979{
980 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
981 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
982 if ( !CPUMIsGuestInLongMode(pVCpu)
983 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
984 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
985 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
986}
987
988
989VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
990{
991 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
992 return pVCpu->cpum.s.Guest.ldtr.Sel;
993}
994
995
996VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
997{
998 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
999 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1000 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1001 return pVCpu->cpum.s.Guest.ldtr.Sel;
1002}
1003
1004
1005VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
1006{
1007 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1008 return pVCpu->cpum.s.Guest.cr0;
1009}
1010
1011
1012VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
1013{
1014 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
1015 return pVCpu->cpum.s.Guest.cr2;
1016}
1017
1018
1019VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
1020{
1021 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1022 return pVCpu->cpum.s.Guest.cr3;
1023}
1024
1025
1026VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
1027{
1028 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1029 return pVCpu->cpum.s.Guest.cr4;
1030}
1031
1032
1033VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPU pVCpu)
1034{
1035 uint64_t u64;
1036 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1037 if (RT_FAILURE(rc))
1038 u64 = 0;
1039 return u64;
1040}
1041
1042
1043VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
1044{
1045 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
1046 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1047}
1048
1049
1050VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
1051{
1052 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1053 return pVCpu->cpum.s.Guest.eip;
1054}
1055
1056
1057VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
1058{
1059 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1060 return pVCpu->cpum.s.Guest.rip;
1061}
1062
1063
1064VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
1065{
1066 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
1067 return pVCpu->cpum.s.Guest.eax;
1068}
1069
1070
1071VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
1072{
1073 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
1074 return pVCpu->cpum.s.Guest.ebx;
1075}
1076
1077
1078VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
1079{
1080 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
1081 return pVCpu->cpum.s.Guest.ecx;
1082}
1083
1084
1085VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
1086{
1087 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
1088 return pVCpu->cpum.s.Guest.edx;
1089}
1090
1091
1092VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
1093{
1094 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
1095 return pVCpu->cpum.s.Guest.esi;
1096}
1097
1098
1099VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
1100{
1101 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
1102 return pVCpu->cpum.s.Guest.edi;
1103}
1104
1105
1106VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
1107{
1108 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
1109 return pVCpu->cpum.s.Guest.esp;
1110}
1111
1112
1113VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
1114{
1115 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
1116 return pVCpu->cpum.s.Guest.ebp;
1117}
1118
1119
1120VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
1121{
1122 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1123 return pVCpu->cpum.s.Guest.eflags.u32;
1124}
1125
1126
1127VMMDECL(int) CPUMGetGuestCRx(PCVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1128{
1129 switch (iReg)
1130 {
1131 case DISCREG_CR0:
1132 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1133 *pValue = pVCpu->cpum.s.Guest.cr0;
1134 break;
1135
1136 case DISCREG_CR2:
1137 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
1138 *pValue = pVCpu->cpum.s.Guest.cr2;
1139 break;
1140
1141 case DISCREG_CR3:
1142 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
1143 *pValue = pVCpu->cpum.s.Guest.cr3;
1144 break;
1145
1146 case DISCREG_CR4:
1147 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1148 *pValue = pVCpu->cpum.s.Guest.cr4;
1149 break;
1150
1151 case DISCREG_CR8:
1152 {
1153 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1154 uint8_t u8Tpr;
1155 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1156 if (RT_FAILURE(rc))
1157 {
1158 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1159 *pValue = 0;
1160 return rc;
1161 }
1162 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1163 break;
1164 }
1165
1166 default:
1167 return VERR_INVALID_PARAMETER;
1168 }
1169 return VINF_SUCCESS;
1170}
1171
1172
1173VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
1174{
1175 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1176 return pVCpu->cpum.s.Guest.dr[0];
1177}
1178
1179
1180VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
1181{
1182 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1183 return pVCpu->cpum.s.Guest.dr[1];
1184}
1185
1186
1187VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
1188{
1189 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1190 return pVCpu->cpum.s.Guest.dr[2];
1191}
1192
1193
1194VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
1195{
1196 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
1197 return pVCpu->cpum.s.Guest.dr[3];
1198}
1199
1200
1201VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
1202{
1203 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
1204 return pVCpu->cpum.s.Guest.dr[6];
1205}
1206
1207
1208VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
1209{
1210 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
1211 return pVCpu->cpum.s.Guest.dr[7];
1212}
1213
1214
1215VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1216{
1217 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
1218 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1219 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1220 if (iReg == 4 || iReg == 5)
1221 iReg += 2;
1222 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1223 return VINF_SUCCESS;
1224}
1225
1226
1227VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
1228{
1229 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1230 return pVCpu->cpum.s.Guest.msrEFER;
1231}
1232
1233
1234/**
1235 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1236 *
1237 * @returns Pointer to the leaf if found, NULL if not.
1238 *
1239 * @param pVM The cross context VM structure.
1240 * @param uLeaf The leaf to get.
1241 */
1242PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1243{
1244 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1245 if (iEnd)
1246 {
1247 unsigned iStart = 0;
1248 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1249 for (;;)
1250 {
1251 unsigned i = iStart + (iEnd - iStart) / 2U;
1252 if (uLeaf < paLeaves[i].uLeaf)
1253 {
1254 if (i <= iStart)
1255 return NULL;
1256 iEnd = i;
1257 }
1258 else if (uLeaf > paLeaves[i].uLeaf)
1259 {
1260 i += 1;
1261 if (i >= iEnd)
1262 return NULL;
1263 iStart = i;
1264 }
1265 else
1266 {
1267 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1268 return &paLeaves[i];
1269
1270 /* This shouldn't normally happen. But in case the it does due
1271 to user configuration overrids or something, just return the
1272 first sub-leaf. */
1273 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1274 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1275 while ( paLeaves[i].uSubLeaf != 0
1276 && i > 0
1277 && uLeaf == paLeaves[i - 1].uLeaf)
1278 i--;
1279 return &paLeaves[i];
1280 }
1281 }
1282 }
1283
1284 return NULL;
1285}
1286
1287
1288/**
1289 * Looks up a CPUID leaf in the CPUID leaf array.
1290 *
1291 * @returns Pointer to the leaf if found, NULL if not.
1292 *
1293 * @param pVM The cross context VM structure.
1294 * @param uLeaf The leaf to get.
1295 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1296 * isn't.
1297 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1298 */
1299PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1300{
1301 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1302 if (iEnd)
1303 {
1304 unsigned iStart = 0;
1305 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1306 for (;;)
1307 {
1308 unsigned i = iStart + (iEnd - iStart) / 2U;
1309 if (uLeaf < paLeaves[i].uLeaf)
1310 {
1311 if (i <= iStart)
1312 return NULL;
1313 iEnd = i;
1314 }
1315 else if (uLeaf > paLeaves[i].uLeaf)
1316 {
1317 i += 1;
1318 if (i >= iEnd)
1319 return NULL;
1320 iStart = i;
1321 }
1322 else
1323 {
1324 uSubLeaf &= paLeaves[i].fSubLeafMask;
1325 if (uSubLeaf == paLeaves[i].uSubLeaf)
1326 *pfExactSubLeafHit = true;
1327 else
1328 {
1329 /* Find the right subleaf. We return the last one before
1330 uSubLeaf if we don't find an exact match. */
1331 if (uSubLeaf < paLeaves[i].uSubLeaf)
1332 while ( i > 0
1333 && uLeaf == paLeaves[i - 1].uLeaf
1334 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1335 i--;
1336 else
1337 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1338 && uLeaf == paLeaves[i + 1].uLeaf
1339 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1340 i++;
1341 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1342 }
1343 return &paLeaves[i];
1344 }
1345 }
1346 }
1347
1348 *pfExactSubLeafHit = false;
1349 return NULL;
1350}
1351
1352
1353/**
1354 * Gets a CPUID leaf.
1355 *
1356 * @param pVCpu The cross context virtual CPU structure.
1357 * @param uLeaf The CPUID leaf to get.
1358 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1359 * @param pEax Where to store the EAX value.
1360 * @param pEbx Where to store the EBX value.
1361 * @param pEcx Where to store the ECX value.
1362 * @param pEdx Where to store the EDX value.
1363 */
1364VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1365 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1366{
1367 bool fExactSubLeafHit;
1368 PVM pVM = pVCpu->CTX_SUFF(pVM);
1369 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1370 if (pLeaf)
1371 {
1372 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1373 if (fExactSubLeafHit)
1374 {
1375 *pEax = pLeaf->uEax;
1376 *pEbx = pLeaf->uEbx;
1377 *pEcx = pLeaf->uEcx;
1378 *pEdx = pLeaf->uEdx;
1379
1380 /*
1381 * Deal with CPU specific information.
1382 */
1383 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1384 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1385 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1386 {
1387 if (uLeaf == 1)
1388 {
1389 /* EBX: Bits 31-24: Initial APIC ID. */
1390 Assert(pVCpu->idCpu <= 255);
1391 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1392 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1393
1394 /* EDX: Bit 9: AND with APICBASE.EN. */
1395 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1396 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1397
1398 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1399 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1400 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1401 }
1402 else if (uLeaf == 0xb)
1403 {
1404 /* EDX: Initial extended APIC ID. */
1405 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1406 *pEdx = pVCpu->idCpu;
1407 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1408 }
1409 else if (uLeaf == UINT32_C(0x8000001e))
1410 {
1411 /* EAX: Initial extended APIC ID. */
1412 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1413 *pEax = pVCpu->idCpu;
1414 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1415 }
1416 else if (uLeaf == UINT32_C(0x80000001))
1417 {
1418 /* EDX: Bit 9: AND with APICBASE.EN. */
1419 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1420 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1421 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1422 }
1423 else
1424 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1425 }
1426 }
1427 /*
1428 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1429 * them here, but we do the best we can here...
1430 */
1431 else
1432 {
1433 *pEax = *pEbx = *pEcx = *pEdx = 0;
1434 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1435 {
1436 *pEcx = uSubLeaf & 0xff;
1437 *pEdx = pVCpu->idCpu;
1438 }
1439 }
1440 }
1441 else
1442 {
1443 /*
1444 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1445 */
1446 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1447 {
1448 default:
1449 AssertFailed();
1450 RT_FALL_THRU();
1451 case CPUMUNKNOWNCPUID_DEFAULTS:
1452 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1453 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1454 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1455 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1456 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1457 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1458 break;
1459 case CPUMUNKNOWNCPUID_PASSTHRU:
1460 *pEax = uLeaf;
1461 *pEbx = 0;
1462 *pEcx = uSubLeaf;
1463 *pEdx = 0;
1464 break;
1465 }
1466 }
1467 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1468}
1469
1470
1471/**
1472 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1473 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1474 *
1475 * @returns Previous value.
1476 * @param pVCpu The cross context virtual CPU structure to make the
1477 * change on. Usually the calling EMT.
1478 * @param fVisible Whether to make it visible (true) or hide it (false).
1479 *
1480 * @remarks This is "VMMDECL" so that it still links with
1481 * the old APIC code which is in VBoxDD2 and not in
1482 * the VMM module.
1483 */
1484VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1485{
1486 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1487 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1488
1489#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1490 /*
1491 * Patch manager saved state legacy pain.
1492 */
1493 PVM pVM = pVCpu->CTX_SUFF(pVM);
1494 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1495 if (pLeaf)
1496 {
1497 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1498 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1499 else
1500 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1501 }
1502
1503 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1504 if (pLeaf)
1505 {
1506 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1507 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1508 else
1509 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1510 }
1511#endif
1512
1513 return fOld;
1514}
1515
1516
1517/**
1518 * Gets the host CPU vendor.
1519 *
1520 * @returns CPU vendor.
1521 * @param pVM The cross context VM structure.
1522 */
1523VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1524{
1525 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1526}
1527
1528
1529/**
1530 * Gets the CPU vendor.
1531 *
1532 * @returns CPU vendor.
1533 * @param pVM The cross context VM structure.
1534 */
1535VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1536{
1537 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1538}
1539
1540
1541VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1542{
1543 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1544 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1545}
1546
1547
1548VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1549{
1550 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1551 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1552}
1553
1554
1555VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1556{
1557 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1558 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1559}
1560
1561
1562VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1563{
1564 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1565 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1566}
1567
1568
1569VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1570{
1571 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1572 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1573 return VINF_SUCCESS; /* No need to recalc. */
1574}
1575
1576
1577VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1578{
1579 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1580 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1581 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1582}
1583
1584
1585VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1586{
1587 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1588 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1589 if (iReg == 4 || iReg == 5)
1590 iReg += 2;
1591 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1592 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1593}
1594
1595
1596/**
1597 * Recalculates the hypervisor DRx register values based on current guest
1598 * registers and DBGF breakpoints, updating changed registers depending on the
1599 * context.
1600 *
1601 * This is called whenever a guest DRx register is modified (any context) and
1602 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1603 *
1604 * In raw-mode context this function will reload any (hyper) DRx registers which
1605 * comes out with a different value. It may also have to save the host debug
1606 * registers if that haven't been done already. In this context though, we'll
1607 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1608 * are only important when breakpoints are actually enabled.
1609 *
1610 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1611 * reloaded by the HM code if it changes. Further more, we will only use the
1612 * combined register set when the VBox debugger is actually using hardware BPs,
1613 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1614 * concern us here).
1615 *
1616 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1617 * all the time.
1618 *
1619 * @returns VINF_SUCCESS.
1620 * @param pVCpu The cross context virtual CPU structure.
1621 * @param iGstReg The guest debug register number that was modified.
1622 * UINT8_MAX if not guest register.
1623 * @param fForceHyper Used in HM to force hyper registers because of single
1624 * stepping.
1625 */
1626VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1627{
1628 PVM pVM = pVCpu->CTX_SUFF(pVM);
1629#ifndef IN_RING0
1630 RT_NOREF_PV(iGstReg);
1631#endif
1632
1633 /*
1634 * Compare the DR7s first.
1635 *
1636 * We only care about the enabled flags. GD is virtualized when we
1637 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1638 * always have the LE and GE bits set, so no need to check and disable
1639 * stuff if they're cleared like we have to for the guest DR7.
1640 */
1641 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1642 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1643 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1644 uGstDr7 = 0;
1645 else if (!(uGstDr7 & X86_DR7_LE))
1646 uGstDr7 &= ~X86_DR7_LE_ALL;
1647 else if (!(uGstDr7 & X86_DR7_GE))
1648 uGstDr7 &= ~X86_DR7_GE_ALL;
1649
1650 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1651
1652#ifdef IN_RING0
1653 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1654 fForceHyper = true;
1655#endif
1656 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1657 & X86_DR7_ENABLED_MASK)
1658 {
1659 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1660#ifdef IN_RC
1661 bool const fRawModeEnabled = true;
1662#elif defined(IN_RING3)
1663 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1664#endif
1665
1666 /*
1667 * Ok, something is enabled. Recalc each of the breakpoints, taking
1668 * the VM debugger ones of the guest ones. In raw-mode context we will
1669 * not allow breakpoints with values inside the hypervisor area.
1670 */
1671 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1672
1673 /* bp 0 */
1674 RTGCUINTREG uNewDr0;
1675 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1676 {
1677 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1678 uNewDr0 = DBGFBpGetDR0(pVM);
1679 }
1680 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1681 {
1682 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1683#ifndef IN_RING0
1684 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1685 uNewDr0 = 0;
1686 else
1687#endif
1688 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1689 }
1690 else
1691 uNewDr0 = 0;
1692
1693 /* bp 1 */
1694 RTGCUINTREG uNewDr1;
1695 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1696 {
1697 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1698 uNewDr1 = DBGFBpGetDR1(pVM);
1699 }
1700 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1701 {
1702 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1703#ifndef IN_RING0
1704 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1705 uNewDr1 = 0;
1706 else
1707#endif
1708 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1709 }
1710 else
1711 uNewDr1 = 0;
1712
1713 /* bp 2 */
1714 RTGCUINTREG uNewDr2;
1715 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1716 {
1717 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1718 uNewDr2 = DBGFBpGetDR2(pVM);
1719 }
1720 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1721 {
1722 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1723#ifndef IN_RING0
1724 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1725 uNewDr2 = 0;
1726 else
1727#endif
1728 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1729 }
1730 else
1731 uNewDr2 = 0;
1732
1733 /* bp 3 */
1734 RTGCUINTREG uNewDr3;
1735 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1736 {
1737 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1738 uNewDr3 = DBGFBpGetDR3(pVM);
1739 }
1740 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1741 {
1742 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1743#ifndef IN_RING0
1744 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1745 uNewDr3 = 0;
1746 else
1747#endif
1748 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1749 }
1750 else
1751 uNewDr3 = 0;
1752
1753 /*
1754 * Apply the updates.
1755 */
1756#ifdef IN_RC
1757 /* Make sure to save host registers first. */
1758 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1759 {
1760 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1761 {
1762 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1763 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1764 }
1765 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1766 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1767 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1768 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1769 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1770
1771 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1772 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1773 ASMSetDR0(uNewDr0);
1774 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1775 ASMSetDR1(uNewDr1);
1776 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1777 ASMSetDR2(uNewDr2);
1778 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1779 ASMSetDR3(uNewDr3);
1780 ASMSetDR6(X86_DR6_INIT_VAL);
1781 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1782 ASMSetDR7(uNewDr7);
1783 }
1784 else
1785#endif
1786 {
1787 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1788 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1789 CPUMSetHyperDR3(pVCpu, uNewDr3);
1790 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1791 CPUMSetHyperDR2(pVCpu, uNewDr2);
1792 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1793 CPUMSetHyperDR1(pVCpu, uNewDr1);
1794 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1795 CPUMSetHyperDR0(pVCpu, uNewDr0);
1796 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1797 CPUMSetHyperDR7(pVCpu, uNewDr7);
1798 }
1799 }
1800#ifdef IN_RING0
1801 else if (CPUMIsGuestDebugStateActive(pVCpu))
1802 {
1803 /*
1804 * Reload the register that was modified. Normally this won't happen
1805 * as we won't intercept DRx writes when not having the hyper debug
1806 * state loaded, but in case we do for some reason we'll simply deal
1807 * with it.
1808 */
1809 switch (iGstReg)
1810 {
1811 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1812 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1813 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1814 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1815 default:
1816 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1817 }
1818 }
1819#endif
1820 else
1821 {
1822 /*
1823 * No active debug state any more. In raw-mode this means we have to
1824 * make sure DR7 has everything disabled now, if we armed it already.
1825 * In ring-0 we might end up here when just single stepping.
1826 */
1827#if defined(IN_RC) || defined(IN_RING0)
1828 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1829 {
1830# ifdef IN_RC
1831 ASMSetDR7(X86_DR7_INIT_VAL);
1832# endif
1833 if (pVCpu->cpum.s.Hyper.dr[0])
1834 ASMSetDR0(0);
1835 if (pVCpu->cpum.s.Hyper.dr[1])
1836 ASMSetDR1(0);
1837 if (pVCpu->cpum.s.Hyper.dr[2])
1838 ASMSetDR2(0);
1839 if (pVCpu->cpum.s.Hyper.dr[3])
1840 ASMSetDR3(0);
1841 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1842 }
1843#endif
1844 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1845
1846 /* Clear all the registers. */
1847 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1848 pVCpu->cpum.s.Hyper.dr[3] = 0;
1849 pVCpu->cpum.s.Hyper.dr[2] = 0;
1850 pVCpu->cpum.s.Hyper.dr[1] = 0;
1851 pVCpu->cpum.s.Hyper.dr[0] = 0;
1852
1853 }
1854 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1855 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1856 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1857 pVCpu->cpum.s.Hyper.dr[7]));
1858
1859 return VINF_SUCCESS;
1860}
1861
1862
1863/**
1864 * Set the guest XCR0 register.
1865 *
1866 * Will load additional state if the FPU state is already loaded (in ring-0 &
1867 * raw-mode context).
1868 *
1869 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1870 * value.
1871 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1872 * @param uNewValue The new value.
1873 * @thread EMT(pVCpu)
1874 */
1875VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1876{
1877 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1878 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1879 /* The X87 bit cannot be cleared. */
1880 && (uNewValue & XSAVE_C_X87)
1881 /* AVX requires SSE. */
1882 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1883 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1884 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1885 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1886 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1887 )
1888 {
1889 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1890
1891 /* If more state components are enabled, we need to take care to load
1892 them if the FPU/SSE state is already loaded. May otherwise leak
1893 host state to the guest. */
1894 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1895 if (fNewComponents)
1896 {
1897#if defined(IN_RING0) || defined(IN_RC)
1898 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1899 {
1900 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1901 /* Adding more components. */
1902 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1903 else
1904 {
1905 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1906 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1907 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1908 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1909 }
1910 }
1911#endif
1912 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1913 }
1914 return VINF_SUCCESS;
1915 }
1916 return VERR_CPUM_RAISE_GP_0;
1917}
1918
1919
1920/**
1921 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1922 *
1923 * @returns true if in real mode, otherwise false.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 */
1926VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1927{
1928 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1929 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1930}
1931
1932
1933/**
1934 * Tests if the guest has the Page Size Extension enabled (PSE).
1935 *
1936 * @returns true if in real mode, otherwise false.
1937 * @param pVCpu The cross context virtual CPU structure.
1938 */
1939VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1940{
1941 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1942 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1943 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1944}
1945
1946
1947/**
1948 * Tests if the guest has the paging enabled (PG).
1949 *
1950 * @returns true if in real mode, otherwise false.
1951 * @param pVCpu The cross context virtual CPU structure.
1952 */
1953VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1954{
1955 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1956 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1957}
1958
1959
1960/**
1961 * Tests if the guest has the paging enabled (PG).
1962 *
1963 * @returns true if in real mode, otherwise false.
1964 * @param pVCpu The cross context virtual CPU structure.
1965 */
1966VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1967{
1968 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1969 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1970}
1971
1972
1973/**
1974 * Tests if the guest is running in real mode or not.
1975 *
1976 * @returns true if in real mode, otherwise false.
1977 * @param pVCpu The cross context virtual CPU structure.
1978 */
1979VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1980{
1981 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1982 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1983}
1984
1985
1986/**
1987 * Tests if the guest is running in real or virtual 8086 mode.
1988 *
1989 * @returns @c true if it is, @c false if not.
1990 * @param pVCpu The cross context virtual CPU structure.
1991 */
1992VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1993{
1994 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1995 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1996 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1997}
1998
1999
2000/**
2001 * Tests if the guest is running in protected or not.
2002 *
2003 * @returns true if in protected mode, otherwise false.
2004 * @param pVCpu The cross context virtual CPU structure.
2005 */
2006VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
2007{
2008 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
2009 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2010}
2011
2012
2013/**
2014 * Tests if the guest is running in paged protected or not.
2015 *
2016 * @returns true if in paged protected mode, otherwise false.
2017 * @param pVCpu The cross context virtual CPU structure.
2018 */
2019VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
2020{
2021 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
2022 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2023}
2024
2025
2026/**
2027 * Tests if the guest is running in long mode or not.
2028 *
2029 * @returns true if in long mode, otherwise false.
2030 * @param pVCpu The cross context virtual CPU structure.
2031 */
2032VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
2033{
2034 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
2035 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2036}
2037
2038
2039/**
2040 * Tests if the guest is running in PAE mode or not.
2041 *
2042 * @returns true if in PAE mode, otherwise false.
2043 * @param pVCpu The cross context virtual CPU structure.
2044 */
2045VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
2046{
2047 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2048 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
2049 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
2050 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2051 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
2052 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2053}
2054
2055
2056/**
2057 * Tests if the guest is running in 64 bits mode or not.
2058 *
2059 * @returns true if in 64 bits protected mode, otherwise false.
2060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2061 */
2062VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2063{
2064 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
2065 if (!CPUMIsGuestInLongMode(pVCpu))
2066 return false;
2067 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2068 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2069}
2070
2071
2072/**
2073 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2074 * registers.
2075 *
2076 * @returns true if in 64 bits protected mode, otherwise false.
2077 * @param pCtx Pointer to the current guest CPU context.
2078 */
2079VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2080{
2081 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2082}
2083
2084#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2085
2086/**
2087 *
2088 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2089 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2091 */
2092VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PCVMCPU pVCpu)
2093{
2094 return pVCpu->cpum.s.fRawEntered;
2095}
2096
2097/**
2098 * Transforms the guest CPU state to raw-ring mode.
2099 *
2100 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2101 *
2102 * @returns VBox status code. (recompiler failure)
2103 * @param pVCpu The cross context virtual CPU structure.
2104 * @see @ref pg_raw
2105 */
2106VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2107{
2108 PVM pVM = pVCpu->CTX_SUFF(pVM);
2109
2110 Assert(!pVCpu->cpum.s.fRawEntered);
2111 Assert(!pVCpu->cpum.s.fRemEntered);
2112 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2113
2114 /*
2115 * Are we in Ring-0?
2116 */
2117 if ( pCtx->ss.Sel
2118 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2119 && !pCtx->eflags.Bits.u1VM)
2120 {
2121 /*
2122 * Enter execution mode.
2123 */
2124 PATMRawEnter(pVM, pCtx);
2125
2126 /*
2127 * Set CPL to Ring-1.
2128 */
2129 pCtx->ss.Sel |= 1;
2130 if ( pCtx->cs.Sel
2131 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2132 pCtx->cs.Sel |= 1;
2133 }
2134 else
2135 {
2136# ifdef VBOX_WITH_RAW_RING1
2137 if ( EMIsRawRing1Enabled(pVM)
2138 && !pCtx->eflags.Bits.u1VM
2139 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2140 {
2141 /* Set CPL to Ring-2. */
2142 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2143 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2144 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2145 }
2146# else
2147 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2148 ("ring-1 code not supported\n"));
2149# endif
2150 /*
2151 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2152 */
2153 PATMRawEnter(pVM, pCtx);
2154 }
2155
2156 /*
2157 * Assert sanity.
2158 */
2159 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2160 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2161 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2162 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2163
2164 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2165
2166 pVCpu->cpum.s.fRawEntered = true;
2167 return VINF_SUCCESS;
2168}
2169
2170
2171/**
2172 * Transforms the guest CPU state from raw-ring mode to correct values.
2173 *
2174 * This function will change any selector registers with DPL=1 to DPL=0.
2175 *
2176 * @returns Adjusted rc.
2177 * @param pVCpu The cross context virtual CPU structure.
2178 * @param rc Raw mode return code
2179 * @see @ref pg_raw
2180 */
2181VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2182{
2183 PVM pVM = pVCpu->CTX_SUFF(pVM);
2184
2185 /*
2186 * Don't leave if we've already left (in RC).
2187 */
2188 Assert(!pVCpu->cpum.s.fRemEntered);
2189 if (!pVCpu->cpum.s.fRawEntered)
2190 return rc;
2191 pVCpu->cpum.s.fRawEntered = false;
2192
2193 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2194 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2195 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2196 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2197
2198 /*
2199 * Are we executing in raw ring-1?
2200 */
2201 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2202 && !pCtx->eflags.Bits.u1VM)
2203 {
2204 /*
2205 * Leave execution mode.
2206 */
2207 PATMRawLeave(pVM, pCtx, rc);
2208 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2209 /** @todo See what happens if we remove this. */
2210 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2211 pCtx->ds.Sel &= ~X86_SEL_RPL;
2212 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2213 pCtx->es.Sel &= ~X86_SEL_RPL;
2214 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2215 pCtx->fs.Sel &= ~X86_SEL_RPL;
2216 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2217 pCtx->gs.Sel &= ~X86_SEL_RPL;
2218
2219 /*
2220 * Ring-1 selector => Ring-0.
2221 */
2222 pCtx->ss.Sel &= ~X86_SEL_RPL;
2223 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2224 pCtx->cs.Sel &= ~X86_SEL_RPL;
2225 }
2226 else
2227 {
2228 /*
2229 * PATM is taking care of the IOPL and IF flags for us.
2230 */
2231 PATMRawLeave(pVM, pCtx, rc);
2232 if (!pCtx->eflags.Bits.u1VM)
2233 {
2234# ifdef VBOX_WITH_RAW_RING1
2235 if ( EMIsRawRing1Enabled(pVM)
2236 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2237 {
2238 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2239 /** @todo See what happens if we remove this. */
2240 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2241 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2242 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2243 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2244 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2245 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2246 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2247 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2248
2249 /*
2250 * Ring-2 selector => Ring-1.
2251 */
2252 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2253 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2254 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2255 }
2256 else
2257 {
2258# endif
2259 /** @todo See what happens if we remove this. */
2260 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2261 pCtx->ds.Sel &= ~X86_SEL_RPL;
2262 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2263 pCtx->es.Sel &= ~X86_SEL_RPL;
2264 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2265 pCtx->fs.Sel &= ~X86_SEL_RPL;
2266 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2267 pCtx->gs.Sel &= ~X86_SEL_RPL;
2268# ifdef VBOX_WITH_RAW_RING1
2269 }
2270# endif
2271 }
2272 }
2273
2274 return rc;
2275}
2276
2277#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2278
2279/**
2280 * Updates the EFLAGS while we're in raw-mode.
2281 *
2282 * @param pVCpu The cross context virtual CPU structure.
2283 * @param fEfl The new EFLAGS value.
2284 */
2285VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2286{
2287#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2288 if (pVCpu->cpum.s.fRawEntered)
2289 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2290 else
2291#endif
2292 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2293}
2294
2295
2296/**
2297 * Gets the EFLAGS while we're in raw-mode.
2298 *
2299 * @returns The eflags.
2300 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2301 */
2302VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2303{
2304#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2305 if (pVCpu->cpum.s.fRawEntered)
2306 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2307#endif
2308 return pVCpu->cpum.s.Guest.eflags.u32;
2309}
2310
2311
2312/**
2313 * Sets the specified changed flags (CPUM_CHANGED_*).
2314 *
2315 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2316 * @param fChangedAdd The changed flags to add.
2317 */
2318VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2319{
2320 pVCpu->cpum.s.fChanged |= fChangedAdd;
2321}
2322
2323
2324/**
2325 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2326 *
2327 * @returns true if supported.
2328 * @returns false if not supported.
2329 * @param pVM The cross context VM structure.
2330 */
2331VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2332{
2333 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2334}
2335
2336
2337/**
2338 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2339 * @returns true if used.
2340 * @returns false if not used.
2341 * @param pVM The cross context VM structure.
2342 */
2343VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2344{
2345 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2346}
2347
2348
2349/**
2350 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2351 * @returns true if used.
2352 * @returns false if not used.
2353 * @param pVM The cross context VM structure.
2354 */
2355VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2356{
2357 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2358}
2359
2360#ifdef IN_RC
2361
2362/**
2363 * Lazily sync in the FPU/XMM state.
2364 *
2365 * @returns VBox status code.
2366 * @param pVCpu The cross context virtual CPU structure.
2367 */
2368VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2369{
2370 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2371}
2372
2373#endif /* !IN_RC */
2374
2375/**
2376 * Checks if we activated the FPU/XMM state of the guest OS.
2377 *
2378 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2379 * time we'll be executing guest code, so it may return true for 64-on-32 when
2380 * we still haven't actually loaded the FPU status, just scheduled it to be
2381 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2382 *
2383 * @returns true / false.
2384 * @param pVCpu The cross context virtual CPU structure.
2385 */
2386VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2387{
2388 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2389}
2390
2391
2392/**
2393 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2394 *
2395 * @returns true / false.
2396 * @param pVCpu The cross context virtual CPU structure.
2397 */
2398VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2399{
2400 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2401}
2402
2403
2404/**
2405 * Checks if we saved the FPU/XMM state of the host OS.
2406 *
2407 * @returns true / false.
2408 * @param pVCpu The cross context virtual CPU structure.
2409 */
2410VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2411{
2412 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2413}
2414
2415
2416/**
2417 * Checks if the guest debug state is active.
2418 *
2419 * @returns boolean
2420 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2421 */
2422VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2423{
2424 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2425}
2426
2427
2428/**
2429 * Checks if the guest debug state is to be made active during the world-switch
2430 * (currently only used for the 32->64 switcher case).
2431 *
2432 * @returns boolean
2433 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2434 */
2435VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2436{
2437 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2438}
2439
2440
2441/**
2442 * Checks if the hyper debug state is active.
2443 *
2444 * @returns boolean
2445 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2446 */
2447VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2448{
2449 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2450}
2451
2452
2453/**
2454 * Checks if the hyper debug state is to be made active during the world-switch
2455 * (currently only used for the 32->64 switcher case).
2456 *
2457 * @returns boolean
2458 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2459 */
2460VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2461{
2462 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2463}
2464
2465
2466/**
2467 * Mark the guest's debug state as inactive.
2468 *
2469 * @returns boolean
2470 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2471 * @todo This API doesn't make sense any more.
2472 */
2473VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2474{
2475 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2476 NOREF(pVCpu);
2477}
2478
2479
2480/**
2481 * Get the current privilege level of the guest.
2482 *
2483 * @returns CPL
2484 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2485 */
2486VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2487{
2488 /*
2489 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2490 *
2491 * Note! We used to check CS.DPL here, assuming it was always equal to
2492 * CPL even if a conforming segment was loaded. But this turned out to
2493 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2494 * during install after a far call to ring 2 with VT-x. Then on newer
2495 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2496 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2497 *
2498 * So, forget CS.DPL, always use SS.DPL.
2499 *
2500 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2501 * isn't necessarily equal if the segment is conforming.
2502 * See section 4.11.1 in the AMD manual.
2503 *
2504 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2505 * right after real->prot mode switch and when in V8086 mode? That
2506 * section says the RPL specified in a direct transfere (call, jmp,
2507 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2508 * it would be impossible for an exception handle or the iret
2509 * instruction to figure out whether SS:ESP are part of the frame
2510 * or not. VBox or qemu bug must've lead to this misconception.
2511 *
2512 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2513 * selector into SS with an RPL other than the CPL when CPL != 3 and
2514 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2515 * RPL = CPL. Weird.
2516 */
2517 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
2518 uint32_t uCpl;
2519 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2520 {
2521 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2522 {
2523 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2524 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2525 else
2526 {
2527 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2528#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2529# ifdef VBOX_WITH_RAW_RING1
2530 if (pVCpu->cpum.s.fRawEntered)
2531 {
2532 if ( uCpl == 2
2533 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2534 uCpl = 1;
2535 else if (uCpl == 1)
2536 uCpl = 0;
2537 }
2538 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2539# else
2540 if (uCpl == 1)
2541 uCpl = 0;
2542# endif
2543#endif
2544 }
2545 }
2546 else
2547 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2548 }
2549 else
2550 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2551 return uCpl;
2552}
2553
2554
2555/**
2556 * Gets the current guest CPU mode.
2557 *
2558 * If paging mode is what you need, check out PGMGetGuestMode().
2559 *
2560 * @returns The CPU mode.
2561 * @param pVCpu The cross context virtual CPU structure.
2562 */
2563VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2564{
2565 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
2566 CPUMMODE enmMode;
2567 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2568 enmMode = CPUMMODE_REAL;
2569 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2570 enmMode = CPUMMODE_PROTECTED;
2571 else
2572 enmMode = CPUMMODE_LONG;
2573
2574 return enmMode;
2575}
2576
2577
2578/**
2579 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2580 *
2581 * @returns 16, 32 or 64.
2582 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2583 */
2584VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2585{
2586 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2587
2588 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2589 return 16;
2590
2591 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2592 {
2593 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2594 return 16;
2595 }
2596
2597 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2598 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2599 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2600 return 64;
2601
2602 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2603 return 32;
2604
2605 return 16;
2606}
2607
2608
2609VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2610{
2611 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
2612
2613 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2614 return DISCPUMODE_16BIT;
2615
2616 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2617 {
2618 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2619 return DISCPUMODE_16BIT;
2620 }
2621
2622 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2623 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2624 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2625 return DISCPUMODE_64BIT;
2626
2627 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2628 return DISCPUMODE_32BIT;
2629
2630 return DISCPUMODE_16BIT;
2631}
2632
2633
2634/**
2635 * Gets the guest MXCSR_MASK value.
2636 *
2637 * This does not access the x87 state, but the value we determined at VM
2638 * initialization.
2639 *
2640 * @returns MXCSR mask.
2641 * @param pVM The cross context VM structure.
2642 */
2643VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2644{
2645 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2646}
2647
2648
2649/**
2650 * Returns whether the guest has physical interrupts enabled.
2651 *
2652 * @returns @c true if interrupts are enabled, @c false otherwise.
2653 * @param pVCpu The cross context virtual CPU structure.
2654 *
2655 * @remarks Warning! This function does -not- take into account the global-interrupt
2656 * flag (GIF).
2657 */
2658VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
2659{
2660 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
2661 {
2662#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2663 uint32_t const fEFlags = !pVCpu->cpum.s.fRawEntered ? pVCpu->cpum.s.Guest.eflags.u : CPUMRawGetEFlags(pVCpu);
2664#else
2665 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
2666#endif
2667 return RT_BOOL(fEFlags & X86_EFL_IF);
2668 }
2669
2670 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2671 return CPUMIsGuestVmxPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2672
2673 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2674 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2675}
2676
2677
2678/**
2679 * Returns whether the nested-guest has virtual interrupts enabled.
2680 *
2681 * @returns @c true if interrupts are enabled, @c false otherwise.
2682 * @param pVCpu The cross context virtual CPU structure.
2683 *
2684 * @remarks Warning! This function does -not- take into account the global-interrupt
2685 * flag (GIF).
2686 */
2687VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
2688{
2689 Assert(CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest));
2690
2691 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
2692 return CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2693
2694 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
2695 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
2696}
2697
2698
2699/**
2700 * Calculates the interruptiblity of the guest.
2701 *
2702 * @returns Interruptibility level.
2703 * @param pVCpu The cross context virtual CPU structure.
2704 */
2705VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
2706{
2707#if 1
2708 /* Global-interrupt flag blocks pretty much everything we care about here. */
2709 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
2710 {
2711 /*
2712 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
2713 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
2714 * or raw-mode). Hence we use the function below which handles the details.
2715 */
2716 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
2717 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2718 {
2719 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
2720 || CPUMIsGuestVirtIntrEnabled(pVCpu))
2721 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2722
2723 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
2724 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
2725 }
2726
2727 /*
2728 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
2729 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
2730 * However, there is some uncertainity regarding the converse, i.e. whether
2731 * NMI-blocking until IRET blocks delivery of physical interrupts.
2732 *
2733 * See Intel spec. 25.4.1 "Event Blocking".
2734 */
2735 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2736 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2737
2738 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2739 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2740
2741 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2742 }
2743 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2744#else
2745 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
2746 {
2747 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2748 {
2749 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2750 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2751
2752 /** @todo does blocking NMIs mean interrupts are also inhibited? */
2753 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2754 {
2755 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2756 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2757 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2758 }
2759 AssertFailed();
2760 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2761 }
2762 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2763 }
2764 else
2765 {
2766 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2767 {
2768 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2769 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2770 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2771 }
2772 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2773 }
2774#endif
2775}
2776
2777
2778/**
2779 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
2780 *
2781 * @returns @c true if NMIs are blocked, @c false otherwise.
2782 * @param pVCpu The cross context virtual CPU structure.
2783 */
2784VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
2785{
2786#ifndef IN_RC
2787 /*
2788 * Return the state of guest-NMI blocking in any of the following cases:
2789 * - We're not executing a nested-guest.
2790 * - We're executing an SVM nested-guest[1].
2791 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2792 *
2793 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2794 * SVM hypervisors must track NMI blocking themselves by intercepting
2795 * the IRET instruction after injection of an NMI.
2796 */
2797 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2798 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2799 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2800 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2801 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2802
2803 /*
2804 * Return the state of virtual-NMI blocking, if we are executing a
2805 * VMX nested-guest with virtual-NMIs enabled.
2806 */
2807 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
2808#else
2809 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2810#endif
2811}
2812
2813
2814/**
2815 * Sets blocking delivery of NMIs to the guest.
2816 *
2817 * @param pVCpu The cross context virtual CPU structure.
2818 * @param fBlock Whether NMIs are blocked or not.
2819 */
2820VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2821{
2822#ifndef IN_RC
2823 /*
2824 * Set the state of guest-NMI blocking in any of the following cases:
2825 * - We're not executing a nested-guest.
2826 * - We're executing an SVM nested-guest[1].
2827 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2828 *
2829 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2830 * SVM hypervisors must track NMI blocking themselves by intercepting
2831 * the IRET instruction after injection of an NMI.
2832 */
2833 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2834 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2835 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
2836 || !CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI))
2837 {
2838 if (fBlock)
2839 {
2840 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2841 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2842 }
2843 else
2844 {
2845 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2846 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2847 }
2848 return;
2849 }
2850
2851 /*
2852 * Set the state of virtual-NMI blocking, if we are executing a
2853 * VMX nested-guest with virtual-NMIs enabled.
2854 */
2855 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
2856#else
2857 if (fBlock)
2858 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2859 else
2860 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2861#endif
2862}
2863
2864
2865/**
2866 * Checks whether the SVM nested-guest has physical interrupts enabled.
2867 *
2868 * @returns true if interrupts are enabled, false otherwise.
2869 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2870 * @param pCtx The guest-CPU context.
2871 *
2872 * @remarks This does -not- take into account the global-interrupt flag.
2873 */
2874VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2875{
2876 /** @todo Optimization: Avoid this function call and use a pointer to the
2877 * relevant eflags instead (setup during VMRUN instruction emulation). */
2878#ifdef IN_RC
2879 RT_NOREF2(pVCpu, pCtx);
2880 AssertReleaseFailedReturn(false);
2881#else
2882 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2883
2884 X86EFLAGS fEFlags;
2885 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2886 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2887 else
2888 fEFlags.u = pCtx->eflags.u;
2889
2890 return fEFlags.Bits.u1IF;
2891#endif
2892}
2893
2894
2895/**
2896 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2897 * for injection by VMRUN instruction) interrupts.
2898 *
2899 * @returns VBox status code.
2900 * @retval true if it's ready, false otherwise.
2901 *
2902 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2903 * @param pCtx The guest-CPU context.
2904 */
2905VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2906{
2907#ifdef IN_RC
2908 RT_NOREF2(pVCpu, pCtx);
2909 AssertReleaseFailedReturn(false);
2910#else
2911 RT_NOREF(pVCpu);
2912 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2913
2914 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2915 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2916 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2917 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2918 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2919 return false;
2920
2921 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2922#endif
2923}
2924
2925
2926/**
2927 * Gets the pending SVM nested-guest interruptvector.
2928 *
2929 * @returns The nested-guest interrupt to inject.
2930 * @param pCtx The guest-CPU context.
2931 */
2932VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2933{
2934#ifdef IN_RC
2935 RT_NOREF(pCtx);
2936 AssertReleaseFailedReturn(0);
2937#else
2938 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2939 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2940#endif
2941}
2942
2943
2944/**
2945 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2946 *
2947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2948 * @param pCtx The guest-CPU context.
2949 */
2950VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2951{
2952 /*
2953 * Reload the guest's "host state".
2954 */
2955 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2956 pCtx->es = pHostState->es;
2957 pCtx->cs = pHostState->cs;
2958 pCtx->ss = pHostState->ss;
2959 pCtx->ds = pHostState->ds;
2960 pCtx->gdtr = pHostState->gdtr;
2961 pCtx->idtr = pHostState->idtr;
2962 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2963 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2964 pCtx->cr3 = pHostState->uCr3;
2965 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2966 pCtx->rflags = pHostState->rflags;
2967 pCtx->rflags.Bits.u1VM = 0;
2968 pCtx->rip = pHostState->uRip;
2969 pCtx->rsp = pHostState->uRsp;
2970 pCtx->rax = pHostState->uRax;
2971 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2972 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2973 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2974
2975 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2976 * raise \#GP(0) in the guest. */
2977
2978 /** @todo check the loaded host-state for consistency. Figure out what
2979 * exactly this involves? */
2980}
2981
2982
2983/**
2984 * Saves the host-state to the host-state save area as part of a VMRUN.
2985 *
2986 * @param pCtx The guest-CPU context.
2987 * @param cbInstr The length of the VMRUN instruction in bytes.
2988 */
2989VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2990{
2991 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2992 pHostState->es = pCtx->es;
2993 pHostState->cs = pCtx->cs;
2994 pHostState->ss = pCtx->ss;
2995 pHostState->ds = pCtx->ds;
2996 pHostState->gdtr = pCtx->gdtr;
2997 pHostState->idtr = pCtx->idtr;
2998 pHostState->uEferMsr = pCtx->msrEFER;
2999 pHostState->uCr0 = pCtx->cr0;
3000 pHostState->uCr3 = pCtx->cr3;
3001 pHostState->uCr4 = pCtx->cr4;
3002 pHostState->rflags = pCtx->rflags;
3003 pHostState->uRip = pCtx->rip + cbInstr;
3004 pHostState->uRsp = pCtx->rsp;
3005 pHostState->uRax = pCtx->rax;
3006}
3007
3008
3009/**
3010 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
3011 * nested-guest.
3012 *
3013 * @returns The TSC offset after applying any nested-guest TSC offset.
3014 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3015 * @param uTicks The guest TSC.
3016 *
3017 * @sa CPUMRemoveNestedGuestTscOffset.
3018 */
3019VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3020{
3021#ifndef IN_RC
3022 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3023 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3024 {
3025 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3026 Assert(pVmcs);
3027 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3028 return uTicks + pVmcs->u64TscOffset.u;
3029 return uTicks;
3030 }
3031
3032 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3033 {
3034 uint64_t u64TscOffset;
3035 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3036 {
3037 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3038 Assert(pVmcb);
3039 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3040 }
3041 return uTicks + u64TscOffset;
3042 }
3043#else
3044 RT_NOREF(pVCpu);
3045#endif
3046 return uTicks;
3047}
3048
3049
3050/**
3051 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
3052 * guest.
3053 *
3054 * @returns The TSC offset after removing any nested-guest TSC offset.
3055 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3056 * @param uTicks The nested-guest TSC.
3057 *
3058 * @sa CPUMApplyNestedGuestTscOffset.
3059 */
3060VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
3061{
3062#ifndef IN_RC
3063 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3064 if (CPUMIsGuestInVmxNonRootMode(pCtx))
3065 {
3066 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
3067 {
3068 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3069 Assert(pVmcs);
3070 return uTicks - pVmcs->u64TscOffset.u;
3071 }
3072 return uTicks;
3073 }
3074
3075 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3076 {
3077 uint64_t u64TscOffset;
3078 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
3079 {
3080 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
3081 Assert(pVmcb);
3082 u64TscOffset = pVmcb->ctrl.u64TSCOffset;
3083 }
3084 return uTicks - u64TscOffset;
3085 }
3086#else
3087 RT_NOREF(pVCpu);
3088#endif
3089 return uTicks;
3090}
3091
3092
3093/**
3094 * Used to dynamically imports state residing in NEM or HM.
3095 *
3096 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
3097 *
3098 * @returns VBox status code.
3099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3100 * @param fExtrnImport The fields to import.
3101 * @thread EMT(pVCpu)
3102 */
3103VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport)
3104{
3105 VMCPU_ASSERT_EMT(pVCpu);
3106 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
3107 {
3108#ifndef IN_RC
3109 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
3110 {
3111 case CPUMCTX_EXTRN_KEEPER_NEM:
3112 {
3113 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
3114 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3115 return rc;
3116 }
3117
3118 case CPUMCTX_EXTRN_KEEPER_HM:
3119 {
3120#ifdef IN_RING0
3121 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
3122 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
3123 return rc;
3124#else
3125 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
3126 return VINF_SUCCESS;
3127#endif
3128 }
3129 default:
3130 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3131 }
3132#else
3133 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
3134#endif
3135 }
3136 return VINF_SUCCESS;
3137}
3138
3139
3140/**
3141 * Gets valid CR4 bits for the guest.
3142 *
3143 * @returns Valid CR4 bits.
3144 * @param pVM The cross context VM structure.
3145 */
3146VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
3147{
3148 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
3149 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
3150 | X86_CR4_TSD | X86_CR4_DE
3151 | X86_CR4_PSE | X86_CR4_PAE
3152 | X86_CR4_MCE | X86_CR4_PGE
3153 | X86_CR4_PCE
3154 | X86_CR4_OSXMMEEXCPT; /** @todo r=ramshankar: Introduced in Pentium III along with SSE. Check fSse here? */
3155 if (pGuestFeatures->fFxSaveRstor)
3156 fMask |= X86_CR4_OSFXSR;
3157 if (pGuestFeatures->fVmx)
3158 fMask |= X86_CR4_VMXE;
3159 if (pGuestFeatures->fXSaveRstor)
3160 fMask |= X86_CR4_OSXSAVE;
3161 if (pGuestFeatures->fPcid)
3162 fMask |= X86_CR4_PCIDE;
3163 if (pGuestFeatures->fFsGsBase)
3164 fMask |= X86_CR4_FSGSBASE;
3165 return fMask;
3166}
3167
3168
3169/**
3170 * Gets the read and write permission bits for an MSR in an MSR bitmap.
3171 *
3172 * @returns VMXMSRPM_XXX - the MSR permission.
3173 * @param pvMsrBitmap Pointer to the MSR bitmap.
3174 * @param idMsr The MSR to get permissions for.
3175 *
3176 * @sa hmR0VmxSetMsrPermission.
3177 */
3178VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
3179{
3180 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
3181
3182 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
3183
3184 /*
3185 * MSR Layout:
3186 * Byte index MSR range Interpreted as
3187 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
3188 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
3189 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
3190 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
3191 *
3192 * A bit corresponding to an MSR within the above range causes a VM-exit
3193 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
3194 * the MSR range, it always cause a VM-exit.
3195 *
3196 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
3197 */
3198 uint32_t const offBitmapRead = 0;
3199 uint32_t const offBitmapWrite = 0x800;
3200 uint32_t offMsr;
3201 uint32_t iBit;
3202 if (idMsr <= UINT32_C(0x00001fff))
3203 {
3204 offMsr = 0;
3205 iBit = idMsr;
3206 }
3207 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
3208 {
3209 offMsr = 0x400;
3210 iBit = idMsr - UINT32_C(0xc0000000);
3211 }
3212 else
3213 {
3214 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
3215 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
3216 }
3217
3218 /*
3219 * Get the MSR read permissions.
3220 */
3221 uint32_t fRet;
3222 uint32_t const offMsrRead = offBitmapRead + offMsr;
3223 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
3224 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
3225 fRet = VMXMSRPM_EXIT_RD;
3226 else
3227 fRet = VMXMSRPM_ALLOW_RD;
3228
3229 /*
3230 * Get the MSR write permissions.
3231 */
3232 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
3233 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
3234 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
3235 fRet |= VMXMSRPM_EXIT_WR;
3236 else
3237 fRet |= VMXMSRPM_ALLOW_WR;
3238
3239 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
3240 return fRet;
3241}
3242
3243
3244/**
3245 * Gets the permission bits for the specified I/O port from the given I/O bitmaps.
3246 *
3247 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
3248 * @param pvIoBitmapA Pointer to I/O bitmap A.
3249 * @param pvIoBitmapB Pointer to I/O bitmap B.
3250 * @param uPort The I/O port being accessed.
3251 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3252 */
3253VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
3254 uint8_t cbAccess)
3255{
3256 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3257
3258 /*
3259 * If the I/O port access wraps around the 16-bit port I/O space,
3260 * we must cause a VM-exit.
3261 *
3262 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3263 */
3264 /** @todo r=ramshankar: Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc
3265 * respectively are valid and do not constitute a wrap around from what I
3266 * understand. Verify this later. */
3267 uint32_t const uPortLast = uPort + cbAccess;
3268 if (uPortLast > 0x10000)
3269 return true;
3270
3271 /* Read the appropriate bit from the corresponding IO bitmap. */
3272 void const *pvIoBitmap = uPort < 0x8000 ? pvIoBitmapA : pvIoBitmapB;
3273 return ASMBitTest(pvIoBitmap, uPort);
3274}
3275
3276
3277/**
3278 * Returns whether the given VMCS field is valid and supported for the guest.
3279 *
3280 * @param pVM The cross context VM structure.
3281 * @param u64VmcsField The VMCS field.
3282 *
3283 * @remarks This takes into account the CPU features exposed to the guest.
3284 */
3285VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField)
3286{
3287#ifndef IN_RC
3288 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
3289 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
3290 if (!uFieldEncHi)
3291 { /* likely */ }
3292 else
3293 return false;
3294
3295 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
3296 switch (uFieldEncLo)
3297 {
3298 /*
3299 * 16-bit fields.
3300 */
3301 /* Control fields. */
3302 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
3303 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
3304 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
3305
3306 /* Guest-state fields. */
3307 case VMX_VMCS16_GUEST_ES_SEL:
3308 case VMX_VMCS16_GUEST_CS_SEL:
3309 case VMX_VMCS16_GUEST_SS_SEL:
3310 case VMX_VMCS16_GUEST_DS_SEL:
3311 case VMX_VMCS16_GUEST_FS_SEL:
3312 case VMX_VMCS16_GUEST_GS_SEL:
3313 case VMX_VMCS16_GUEST_LDTR_SEL:
3314 case VMX_VMCS16_GUEST_TR_SEL: return true;
3315 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
3316 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
3317
3318 /* Host-state fields. */
3319 case VMX_VMCS16_HOST_ES_SEL:
3320 case VMX_VMCS16_HOST_CS_SEL:
3321 case VMX_VMCS16_HOST_SS_SEL:
3322 case VMX_VMCS16_HOST_DS_SEL:
3323 case VMX_VMCS16_HOST_FS_SEL:
3324 case VMX_VMCS16_HOST_GS_SEL:
3325 case VMX_VMCS16_HOST_TR_SEL: return true;
3326
3327 /*
3328 * 64-bit fields.
3329 */
3330 /* Control fields. */
3331 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
3332 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
3333 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
3334 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
3335 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
3336 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
3337 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
3338 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
3339 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
3340 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
3341 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
3342 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
3343 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
3344 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
3345 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
3346 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
3347 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
3348 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
3349 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
3350 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
3351 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
3352 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
3353 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
3354 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
3355 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
3356 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
3357 case VMX_VMCS64_CTRL_EPTP_FULL:
3358 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
3359 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
3360 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
3361 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
3362 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
3363 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
3364 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
3365 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
3366 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
3367 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
3368 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
3369 {
3370 PCVMCPU pVCpu = &pVM->aCpus[0];
3371 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
3372 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
3373 }
3374 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
3375 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
3376 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
3377 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
3378 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
3379 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
3380 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
3381 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
3382 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
3383 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
3384 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
3385 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
3386
3387 /* Read-only data fields. */
3388 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
3389 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
3390
3391 /* Guest-state fields. */
3392 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
3393 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
3394 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
3395 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
3396 case VMX_VMCS64_GUEST_PAT_FULL:
3397 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
3398 case VMX_VMCS64_GUEST_EFER_FULL:
3399 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
3400 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
3401 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
3402 case VMX_VMCS64_GUEST_PDPTE0_FULL:
3403 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
3404 case VMX_VMCS64_GUEST_PDPTE1_FULL:
3405 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
3406 case VMX_VMCS64_GUEST_PDPTE2_FULL:
3407 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
3408 case VMX_VMCS64_GUEST_PDPTE3_FULL:
3409 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
3410 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
3411 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
3412
3413 /* Host-state fields. */
3414 case VMX_VMCS64_HOST_PAT_FULL:
3415 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
3416 case VMX_VMCS64_HOST_EFER_FULL:
3417 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
3418 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
3419 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
3420
3421 /*
3422 * 32-bit fields.
3423 */
3424 /* Control fields. */
3425 case VMX_VMCS32_CTRL_PIN_EXEC:
3426 case VMX_VMCS32_CTRL_PROC_EXEC:
3427 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
3428 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
3429 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
3430 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
3431 case VMX_VMCS32_CTRL_EXIT:
3432 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
3433 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
3434 case VMX_VMCS32_CTRL_ENTRY:
3435 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
3436 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
3437 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
3438 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
3439 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
3440 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
3441 case VMX_VMCS32_CTRL_PLE_GAP:
3442 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
3443
3444 /* Read-only data fields. */
3445 case VMX_VMCS32_RO_VM_INSTR_ERROR:
3446 case VMX_VMCS32_RO_EXIT_REASON:
3447 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
3448 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
3449 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
3450 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
3451 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
3452 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
3453
3454 /* Guest-state fields. */
3455 case VMX_VMCS32_GUEST_ES_LIMIT:
3456 case VMX_VMCS32_GUEST_CS_LIMIT:
3457 case VMX_VMCS32_GUEST_SS_LIMIT:
3458 case VMX_VMCS32_GUEST_DS_LIMIT:
3459 case VMX_VMCS32_GUEST_FS_LIMIT:
3460 case VMX_VMCS32_GUEST_GS_LIMIT:
3461 case VMX_VMCS32_GUEST_LDTR_LIMIT:
3462 case VMX_VMCS32_GUEST_TR_LIMIT:
3463 case VMX_VMCS32_GUEST_GDTR_LIMIT:
3464 case VMX_VMCS32_GUEST_IDTR_LIMIT:
3465 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
3466 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
3467 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
3468 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
3469 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
3470 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
3471 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
3472 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
3473 case VMX_VMCS32_GUEST_INT_STATE:
3474 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
3475 case VMX_VMCS32_GUEST_SMBASE:
3476 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
3477 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
3478
3479 /* Host-state fields. */
3480 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
3481
3482 /*
3483 * Natural-width fields.
3484 */
3485 /* Control fields. */
3486 case VMX_VMCS_CTRL_CR0_MASK:
3487 case VMX_VMCS_CTRL_CR4_MASK:
3488 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
3489 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
3490 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
3491 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
3492 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
3493 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
3494
3495 /* Read-only data fields. */
3496 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3497 case VMX_VMCS_RO_IO_RCX:
3498 case VMX_VMCS_RO_IO_RSI:
3499 case VMX_VMCS_RO_IO_RDI:
3500 case VMX_VMCS_RO_IO_RIP:
3501 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
3502
3503 /* Guest-state fields. */
3504 case VMX_VMCS_GUEST_CR0:
3505 case VMX_VMCS_GUEST_CR3:
3506 case VMX_VMCS_GUEST_CR4:
3507 case VMX_VMCS_GUEST_ES_BASE:
3508 case VMX_VMCS_GUEST_CS_BASE:
3509 case VMX_VMCS_GUEST_SS_BASE:
3510 case VMX_VMCS_GUEST_DS_BASE:
3511 case VMX_VMCS_GUEST_FS_BASE:
3512 case VMX_VMCS_GUEST_GS_BASE:
3513 case VMX_VMCS_GUEST_LDTR_BASE:
3514 case VMX_VMCS_GUEST_TR_BASE:
3515 case VMX_VMCS_GUEST_GDTR_BASE:
3516 case VMX_VMCS_GUEST_IDTR_BASE:
3517 case VMX_VMCS_GUEST_DR7:
3518 case VMX_VMCS_GUEST_RSP:
3519 case VMX_VMCS_GUEST_RIP:
3520 case VMX_VMCS_GUEST_RFLAGS:
3521 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
3522 case VMX_VMCS_GUEST_SYSENTER_ESP:
3523 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
3524
3525 /* Host-state fields. */
3526 case VMX_VMCS_HOST_CR0:
3527 case VMX_VMCS_HOST_CR3:
3528 case VMX_VMCS_HOST_CR4:
3529 case VMX_VMCS_HOST_FS_BASE:
3530 case VMX_VMCS_HOST_GS_BASE:
3531 case VMX_VMCS_HOST_TR_BASE:
3532 case VMX_VMCS_HOST_GDTR_BASE:
3533 case VMX_VMCS_HOST_IDTR_BASE:
3534 case VMX_VMCS_HOST_SYSENTER_ESP:
3535 case VMX_VMCS_HOST_SYSENTER_EIP:
3536 case VMX_VMCS_HOST_RSP:
3537 case VMX_VMCS_HOST_RIP: return true;
3538 }
3539
3540 return false;
3541#else
3542 RT_NOREF2(pVM, u64VmcsField);
3543 return false;
3544#endif
3545}
3546
3547
3548/**
3549 * Checks whether the given I/O access should cause a nested-guest VM-exit.
3550 *
3551 * @returns @c true if it causes a VM-exit, @c false otherwise.
3552 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3553 * @param u16Port The I/O port being accessed.
3554 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3555 */
3556VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
3557{
3558#ifndef IN_RC
3559 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3560 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
3561 return true;
3562
3563 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
3564 {
3565 uint8_t const *pbIoBitmapA = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap);
3566 uint8_t const *pbIoBitmapB = (uint8_t const *)pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
3567 Assert(pbIoBitmapA);
3568 Assert(pbIoBitmapB);
3569 return CPUMGetVmxIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
3570 }
3571
3572 return false;
3573#else
3574 RT_NOREF3(pVCpu, u16Port, cbAccess);
3575 return false;
3576#endif
3577}
3578
3579
3580/**
3581 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
3582 *
3583 * @returns @c true if it causes a VM-exit, @c false otherwise.
3584 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3585 * @param uNewCr3 The CR3 value being written.
3586 */
3587VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
3588{
3589#ifndef IN_RC
3590 /*
3591 * If the CR3-load exiting control is set and the new CR3 value does not
3592 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3593 *
3594 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3595 */
3596 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3597 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
3598 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
3599 {
3600 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3601 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3602
3603 /* If the CR3-target count is 0, cause a VM-exit. */
3604 if (uCr3TargetCount == 0)
3605 return true;
3606
3607 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
3608 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
3609 if ( uNewCr3 != pVmcs->u64Cr3Target0.u
3610 && uNewCr3 != pVmcs->u64Cr3Target1.u
3611 && uNewCr3 != pVmcs->u64Cr3Target2.u
3612 && uNewCr3 != pVmcs->u64Cr3Target3.u)
3613 return true;
3614 }
3615 return false;
3616#else
3617 RT_NOREF2(pVCpu, uNewCr3);
3618 return false;
3619#endif
3620}
3621
3622
3623/**
3624 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
3625 * VM-exit or not.
3626 *
3627 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
3628 * @param pVCpu The cross context virtual CPU structure.
3629 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
3630 * VMX_EXIT_VMREAD).
3631 * @param u64VmcsField The VMCS field.
3632 */
3633VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
3634{
3635#ifndef IN_RC
3636 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
3637 Assert( uExitReason == VMX_EXIT_VMREAD
3638 || uExitReason == VMX_EXIT_VMWRITE);
3639
3640 /*
3641 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
3642 */
3643 if (!CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
3644 return true;
3645
3646 /*
3647 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
3648 * is intercepted. This excludes any reserved bits in the valid parts of the field
3649 * encoding (i.e. bit 12).
3650 */
3651 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
3652 return true;
3653
3654 /*
3655 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
3656 */
3657 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
3658 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
3659 ? (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
3660 : (uint8_t const *)pVCpu->cpum.s.Guest.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
3661 Assert(pbBitmap);
3662 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3663 return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7);
3664#else
3665 RT_NOREF3(pVCpu, uExitReason, u64VmcsField);
3666 return false;
3667#endif
3668}
3669
3670
3671
3672/**
3673 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
3674 *
3675 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
3676 * @param u16Port The IO port being accessed.
3677 * @param enmIoType The type of IO access.
3678 * @param cbReg The IO operand size in bytes.
3679 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
3680 * @param iEffSeg The effective segment number.
3681 * @param fRep Whether this is a repeating IO instruction (REP prefix).
3682 * @param fStrIo Whether this is a string IO instruction.
3683 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
3684 * Optional, can be NULL.
3685 */
3686VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
3687 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
3688 PSVMIOIOEXITINFO pIoExitInfo)
3689{
3690 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
3691 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
3692
3693 /*
3694 * The IOPM layout:
3695 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
3696 * two 4K pages.
3697 *
3698 * For IO instructions that access more than a single byte, the permission bits
3699 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
3700 *
3701 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
3702 * we need 3 extra bits beyond the second 4K page.
3703 */
3704 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
3705
3706 uint16_t const offIopm = u16Port >> 3;
3707 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
3708 uint8_t const cShift = u16Port - (offIopm << 3);
3709 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
3710
3711 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
3712 Assert(pbIopm);
3713 pbIopm += offIopm;
3714 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
3715 if (u16Iopm & fIopmMask)
3716 {
3717 if (pIoExitInfo)
3718 {
3719 static const uint32_t s_auIoOpSize[] =
3720 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
3721
3722 static const uint32_t s_auIoAddrSize[] =
3723 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
3724
3725 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
3726 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
3727 pIoExitInfo->n.u1Str = fStrIo;
3728 pIoExitInfo->n.u1Rep = fRep;
3729 pIoExitInfo->n.u3Seg = iEffSeg & 7;
3730 pIoExitInfo->n.u1Type = enmIoType;
3731 pIoExitInfo->n.u16Port = u16Port;
3732 }
3733 return true;
3734 }
3735
3736 /** @todo remove later (for debugging as VirtualBox always traps all IO
3737 * intercepts). */
3738 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
3739 return false;
3740}
3741
3742
3743/**
3744 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
3745 *
3746 * @returns VBox status code.
3747 * @param idMsr The MSR being requested.
3748 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
3749 * bitmap for @a idMsr.
3750 * @param puMsrpmBit Where to store the bit offset starting at the byte
3751 * returned in @a pbOffMsrpm.
3752 */
3753VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
3754{
3755 Assert(pbOffMsrpm);
3756 Assert(puMsrpmBit);
3757
3758 /*
3759 * MSRPM Layout:
3760 * Byte offset MSR range
3761 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
3762 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
3763 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
3764 * 0x1800 - 0x1fff Reserved
3765 *
3766 * Each MSR is represented by 2 permission bits (read and write).
3767 */
3768 if (idMsr <= 0x00001fff)
3769 {
3770 /* Pentium-compatible MSRs. */
3771 uint32_t const bitoffMsr = idMsr << 1;
3772 *pbOffMsrpm = bitoffMsr >> 3;
3773 *puMsrpmBit = bitoffMsr & 7;
3774 return VINF_SUCCESS;
3775 }
3776
3777 if ( idMsr >= 0xc0000000
3778 && idMsr <= 0xc0001fff)
3779 {
3780 /* AMD Sixth Generation x86 Processor MSRs. */
3781 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
3782 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
3783 *puMsrpmBit = bitoffMsr & 7;
3784 return VINF_SUCCESS;
3785 }
3786
3787 if ( idMsr >= 0xc0010000
3788 && idMsr <= 0xc0011fff)
3789 {
3790 /* AMD Seventh and Eighth Generation Processor MSRs. */
3791 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3792 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3793 *puMsrpmBit = bitoffMsr & 7;
3794 return VINF_SUCCESS;
3795 }
3796
3797 *pbOffMsrpm = 0;
3798 *puMsrpmBit = 0;
3799 return VERR_OUT_OF_RANGE;
3800}
3801
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette