VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 47740

最後變更 在這個檔案從47740是 47718,由 vboxsync 提交於 12 年 前

More single stepping work.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 34.8 KB
 
1/* $Id: HMInternal.h 47718 2013-08-14 10:33:22Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
51 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
52 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
53 * always auto load/store the KERNEL_GS_BASE MSR.
54 *
55 * Note: don't forget to update the assembly files while modifying this!
56 */
57/** @todo This define should always be in effect and the define itself removed
58 after 'sufficient' testing. */
59# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
60
61RT_C_DECLS_BEGIN
62
63
64/** @defgroup grp_hm_int Internal
65 * @ingroup grp_hm
66 * @internal
67 * @{
68 */
69
70
71/** Maximum number of exit reason statistics counters. */
72#define MAX_EXITREASON_STAT 0x100
73#define MASK_EXITREASON_STAT 0xff
74#define MASK_INJECT_IRQ_STAT 0xff
75
76/** @name HM changed flags.
77 * These flags are used to keep track of which important registers that
78 * have been changed since last they were reset.
79 * @{
80 */
81#define HM_CHANGED_GUEST_CR0 RT_BIT(0)
82#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
83#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
84#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
85#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
86#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
87#define HM_CHANGED_GUEST_TR RT_BIT(6)
88#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
89#define HM_CHANGED_GUEST_DEBUG RT_BIT(8)
90# define HM_CHANGED_GUEST_RIP RT_BIT(9)
91# define HM_CHANGED_GUEST_RSP RT_BIT(10)
92# define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
93# define HM_CHANGED_GUEST_CR2 RT_BIT(12)
94# define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
95# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
96# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
97/* VT-x specific state. */
98# define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16)
99# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17)
100# define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18)
101# define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19)
102# define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20)
103/* AMD-V specific state. */
104# define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16)
105# define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17)
106# define HM_CHANGED_SVM_RESERVED1 RT_BIT(18)
107# define HM_CHANGED_SVM_RESERVED2 RT_BIT(19)
108# define HM_CHANGED_SVM_RESERVED3 RT_BIT(20)
109
110# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
111 | HM_CHANGED_GUEST_CR3 \
112 | HM_CHANGED_GUEST_CR4 \
113 | HM_CHANGED_GUEST_GDTR \
114 | HM_CHANGED_GUEST_IDTR \
115 | HM_CHANGED_GUEST_LDTR \
116 | HM_CHANGED_GUEST_TR \
117 | HM_CHANGED_GUEST_SEGMENT_REGS \
118 | HM_CHANGED_GUEST_DEBUG \
119 | HM_CHANGED_GUEST_RIP \
120 | HM_CHANGED_GUEST_RSP \
121 | HM_CHANGED_GUEST_RFLAGS \
122 | HM_CHANGED_GUEST_CR2 \
123 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
124 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
125 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
126 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
127 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
128 | HM_CHANGED_VMX_GUEST_APIC_STATE \
129 | HM_CHANGED_VMX_ENTRY_CTLS \
130 | HM_CHANGED_VMX_EXIT_CTLS)
131
132#define HM_CHANGED_HOST_CONTEXT RT_BIT(21)
133/** @} */
134
135/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
136#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
137
138/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
139#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
140/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
141#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
142/** Total guest mapped memory needed. */
143#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
144
145/** Enable for TPR guest patching. */
146#define VBOX_HM_WITH_GUEST_PATCHING
147
148/** HM SSM version
149 */
150#ifdef VBOX_HM_WITH_GUEST_PATCHING
151# define HM_SSM_VERSION 5
152# define HM_SSM_VERSION_NO_PATCHING 4
153#else
154# define HM_SSM_VERSION 4
155# define HM_SSM_VERSION_NO_PATCHING 4
156#endif
157#define HM_SSM_VERSION_2_0_X 3
158
159/**
160 * Global per-cpu information. (host)
161 */
162typedef struct HMGLOBLCPUINFO
163{
164 /** The CPU ID. */
165 RTCPUID idCpu;
166 /** The memory object */
167 RTR0MEMOBJ hMemObj;
168 /** Current ASID (AMD-V) / VPID (Intel). */
169 uint32_t uCurrentAsid;
170 /** TLB flush count. */
171 uint32_t cTlbFlushes;
172 /** Whether to flush each new ASID/VPID before use. */
173 bool fFlushAsidBeforeUse;
174 /** Configured for VT-x or AMD-V. */
175 bool fConfigured;
176 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
177 bool fIgnoreAMDVInUseError;
178 /** In use by our code. (for power suspend) */
179 volatile bool fInUse;
180} HMGLOBLCPUINFO;
181/** Pointer to the per-cpu global information. */
182typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
183
184typedef enum
185{
186 HMPENDINGIO_INVALID = 0,
187 HMPENDINGIO_PORT_READ,
188 HMPENDINGIO_PORT_WRITE,
189 HMPENDINGIO_STRING_READ,
190 HMPENDINGIO_STRING_WRITE,
191 /** The usual 32-bit paranoia. */
192 HMPENDINGIO_32BIT_HACK = 0x7fffffff
193} HMPENDINGIO;
194
195
196typedef enum
197{
198 HMTPRINSTR_INVALID,
199 HMTPRINSTR_READ,
200 HMTPRINSTR_READ_SHR4,
201 HMTPRINSTR_WRITE_REG,
202 HMTPRINSTR_WRITE_IMM,
203 HMTPRINSTR_JUMP_REPLACEMENT,
204 /** The usual 32-bit paranoia. */
205 HMTPRINSTR_32BIT_HACK = 0x7fffffff
206} HMTPRINSTR;
207
208typedef struct
209{
210 /** The key is the address of patched instruction. (32 bits GC ptr) */
211 AVLOU32NODECORE Core;
212 /** Original opcode. */
213 uint8_t aOpcode[16];
214 /** Instruction size. */
215 uint32_t cbOp;
216 /** Replacement opcode. */
217 uint8_t aNewOpcode[16];
218 /** Replacement instruction size. */
219 uint32_t cbNewOp;
220 /** Instruction type. */
221 HMTPRINSTR enmType;
222 /** Source operand. */
223 uint32_t uSrcOperand;
224 /** Destination operand. */
225 uint32_t uDstOperand;
226 /** Number of times the instruction caused a fault. */
227 uint32_t cFaults;
228 /** Patch address of the jump replacement. */
229 RTGCPTR32 pJumpTarget;
230} HMTPRPATCH;
231/** Pointer to HMTPRPATCH. */
232typedef HMTPRPATCH *PHMTPRPATCH;
233
234/**
235 * Switcher function, HC to the special 64-bit RC.
236 *
237 * @param pVM Pointer to the VM.
238 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
239 * @returns Return code indicating the action to take.
240 */
241typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
242/** Pointer to switcher function. */
243typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
244
245/**
246 * HM VM Instance data.
247 * Changes to this must checked against the padding of the hm union in VM!
248 */
249typedef struct HM
250{
251 /** Set when we've initialized VMX or SVM. */
252 bool fInitialized;
253
254 /** Set if nested paging is enabled. */
255 bool fNestedPaging;
256
257 /** Set if nested paging is allowed. */
258 bool fAllowNestedPaging;
259
260 /** Set if large pages are enabled (requires nested paging). */
261 bool fLargePages;
262
263 /** Set if we can support 64-bit guests or not. */
264 bool fAllow64BitGuests;
265
266 /** Set if an IO-APIC is configured for this VM. */
267 bool fHasIoApic;
268
269 /** Set when TPR patching is allowed. */
270 bool fTRPPatchingAllowed;
271
272 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
273 bool fGlobalInit;
274
275 /** Set when TPR patching is active. */
276 bool fTPRPatchingActive;
277 bool u8Alignment[7];
278
279 /** Maximum ASID allowed. */
280 uint32_t uMaxAsid;
281
282 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
283 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
284 uint32_t cMaxResumeLoops;
285
286 /** Guest allocated memory for patching purposes. */
287 RTGCPTR pGuestPatchMem;
288 /** Current free pointer inside the patch block. */
289 RTGCPTR pFreeGuestPatchMem;
290 /** Size of the guest patch memory block. */
291 uint32_t cbGuestPatchMem;
292 uint32_t uPadding1;
293
294#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
295 /** 32 to 64 bits switcher entrypoint. */
296 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
297 RTR0PTR uPadding2;
298#endif
299
300 struct
301 {
302 /** Set by the ring-0 side of HM to indicate VMX is supported by the
303 * CPU. */
304 bool fSupported;
305
306 /** Set when we've enabled VMX. */
307 bool fEnabled;
308
309 /** Set if VPID is supported. */
310 bool fVpid;
311
312 /** Set if VT-x VPID is allowed. */
313 bool fAllowVpid;
314
315 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
316 bool fUnrestrictedGuest;
317
318 /** Set if unrestricted guest execution is allowed to be used. */
319 bool fAllowUnrestricted;
320
321 /** Whether we're using the preemption timer or not. */
322 bool fUsePreemptTimer;
323 /** The shift mask employed by the VMX-Preemption timer. */
324 uint8_t cPreemptTimerShift;
325
326 /** Virtual address of the TSS page used for real mode emulation. */
327 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
328
329 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
330 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
331
332 /** R0 memory object for the APIC-access page. */
333 RTR0MEMOBJ hMemObjApicAccess;
334 /** Physical address of the APIC-access page. */
335 RTHCPHYS HCPhysApicAccess;
336 /** Virtual address of the APIC-access page. */
337 R0PTRTYPE(uint8_t *) pbApicAccess;
338
339#ifdef VBOX_WITH_CRASHDUMP_MAGIC
340 RTR0MEMOBJ hMemObjScratch;
341 RTHCPHYS HCPhysScratch;
342 R0PTRTYPE(uint8_t *) pbScratch;
343#endif
344
345 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
346 unsigned uFlushTaggedTlb;
347
348#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
349 uint32_t u32Alignment;
350#endif
351 /** Host CR4 value (set by ring-0 VMX init) */
352 uint64_t hostCR4;
353
354 /** Host EFER value (set by ring-0 VMX init) */
355 uint64_t hostEFER;
356
357 /** VMX MSR values */
358 struct
359 {
360 uint64_t feature_ctrl;
361 uint64_t vmx_basic_info;
362 VMX_CAPABILITY vmx_pin_ctls;
363 VMX_CAPABILITY vmx_proc_ctls;
364 VMX_CAPABILITY vmx_proc_ctls2;
365 VMX_CAPABILITY vmx_exit;
366 VMX_CAPABILITY vmx_entry;
367 uint64_t vmx_misc;
368 uint64_t vmx_cr0_fixed0;
369 uint64_t vmx_cr0_fixed1;
370 uint64_t vmx_cr4_fixed0;
371 uint64_t vmx_cr4_fixed1;
372 uint64_t vmx_vmcs_enum;
373 uint64_t vmx_vmfunc;
374 uint64_t vmx_ept_vpid_caps;
375 } msr;
376
377 /** Flush types for invept & invvpid; they depend on capabilities. */
378 VMX_FLUSH_EPT enmFlushEpt;
379 VMX_FLUSH_VPID enmFlushVpid;
380 } vmx;
381
382 struct
383 {
384 /** Set by the ring-0 side of HM to indicate SVM is supported by the
385 * CPU. */
386 bool fSupported;
387 /** Set when we've enabled SVM. */
388 bool fEnabled;
389 /** Set if erratum 170 affects the AMD cpu. */
390 bool fAlwaysFlushTLB;
391 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
392 bool fIgnoreInUseError;
393
394 /** R0 memory object for the IO bitmap (12kb). */
395 RTR0MEMOBJ hMemObjIOBitmap;
396 /** Physical address of the IO bitmap (12kb). */
397 RTHCPHYS HCPhysIOBitmap;
398 /** Virtual address of the IO bitmap. */
399 R0PTRTYPE(void *) pvIOBitmap;
400
401 /* HWCR MSR (for diagnostics) */
402 uint64_t msrHwcr;
403
404 /** SVM revision. */
405 uint32_t u32Rev;
406
407 /** SVM feature bits from cpuid 0x8000000a */
408 uint32_t u32Features;
409 } svm;
410
411 /**
412 * AVL tree with all patches (active or disabled) sorted by guest instruction address
413 */
414 AVLOU32TREE PatchTree;
415 uint32_t cPatches;
416 HMTPRPATCH aPatches[64];
417
418 struct
419 {
420 uint32_t u32AMDFeatureECX;
421 uint32_t u32AMDFeatureEDX;
422 } cpuid;
423
424 /** Saved error from detection */
425 int32_t lLastError;
426
427 /** HMR0Init was run */
428 bool fHMR0Init;
429 bool u8Alignment1[7];
430
431 STAMCOUNTER StatTprPatchSuccess;
432 STAMCOUNTER StatTprPatchFailure;
433 STAMCOUNTER StatTprReplaceSuccess;
434 STAMCOUNTER StatTprReplaceFailure;
435} HM;
436/** Pointer to HM VM instance data. */
437typedef HM *PHM;
438
439/* Maximum number of cached entries. */
440#define VMCSCACHE_MAX_ENTRY 128
441
442/* Structure for storing read and write VMCS actions. */
443typedef struct VMCSCACHE
444{
445#ifdef VBOX_WITH_CRASHDUMP_MAGIC
446 /* Magic marker for searching in crash dumps. */
447 uint8_t aMagic[16];
448 uint64_t uMagic;
449 uint64_t u64TimeEntry;
450 uint64_t u64TimeSwitch;
451 uint64_t cResume;
452 uint64_t interPD;
453 uint64_t pSwitcher;
454 uint32_t uPos;
455 uint32_t idCpu;
456#endif
457 /* CR2 is saved here for EPT syncing. */
458 uint64_t cr2;
459 struct
460 {
461 uint32_t cValidEntries;
462 uint32_t uAlignment;
463 uint32_t aField[VMCSCACHE_MAX_ENTRY];
464 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
465 } Write;
466 struct
467 {
468 uint32_t cValidEntries;
469 uint32_t uAlignment;
470 uint32_t aField[VMCSCACHE_MAX_ENTRY];
471 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
472 } Read;
473#ifdef VBOX_STRICT
474 struct
475 {
476 RTHCPHYS HCPhysCpuPage;
477 RTHCPHYS HCPhysVmcs;
478 RTGCPTR pCache;
479 RTGCPTR pCtx;
480 } TestIn;
481 struct
482 {
483 RTHCPHYS HCPhysVmcs;
484 RTGCPTR pCache;
485 RTGCPTR pCtx;
486 uint64_t eflags;
487 uint64_t cr8;
488 } TestOut;
489 struct
490 {
491 uint64_t param1;
492 uint64_t param2;
493 uint64_t param3;
494 uint64_t param4;
495 } ScratchPad;
496#endif
497} VMCSCACHE;
498/** Pointer to VMCSCACHE. */
499typedef VMCSCACHE *PVMCSCACHE;
500
501/** VMX StartVM function. */
502typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
503/** Pointer to a VMX StartVM function. */
504typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
505
506/** SVM VMRun function. */
507typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
508/** Pointer to a SVM VMRun function. */
509typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
510
511/**
512 * HM VMCPU Instance data.
513 */
514typedef struct HMCPU
515{
516 /** Set if we don't have to flush the TLB on VM entry. */
517 bool fResumeVM;
518 /** Set if we need to flush the TLB during the world switch. */
519 bool fForceTLBFlush;
520 /** Set when we're using VT-x or AMD-V at that moment. */
521 bool fActive;
522 /** Set when the TLB has been checked until we return from the world switch. */
523 volatile bool fCheckedTLBFlush;
524 /** Whether we're executing a single instruction. */
525 bool fSingleInstruction;
526 /** Set if we need to clear the trap flag because of single stepping. */
527 bool fClearTrapFlag;
528 uint8_t abAlignment[2];
529
530 /** World switch exit counter. */
531 volatile uint32_t cWorldSwitchExits;
532 /** HM_CHANGED_* flags. */
533 uint32_t fContextUseFlags;
534 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
535 * time). */
536 RTCPUID idLastCpu;
537 /** TLB flush count. */
538 uint32_t cTlbFlushes;
539 /** Current ASID in use by the VM. */
540 uint32_t uCurrentAsid;
541 /** An additional error code used for some gurus. */
542 uint32_t u32HMError;
543
544 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
545 uint64_t u64HostTscAux;
546
547 struct
548 {
549 /** Physical address of the VM control structure (VMCS). */
550 RTHCPHYS HCPhysVmcs;
551 /** R0 memory object for the VM control structure (VMCS). */
552 RTR0MEMOBJ hMemObjVmcs;
553 /** Virtual address of the VM control structure (VMCS). */
554 R0PTRTYPE(void *) pvVmcs;
555 /** Ring 0 handlers for VT-x. */
556 PFNHMVMXSTARTVM pfnStartVM;
557#if HC_ARCH_BITS == 32
558 uint32_t u32Alignment1;
559#endif
560
561 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
562 uint32_t u32PinCtls;
563 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
564 uint32_t u32ProcCtls;
565 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
566 uint32_t u32ProcCtls2;
567 /** Current VMX_VMCS32_CTRL_EXIT. */
568 uint32_t u32ExitCtls;
569 /** Current VMX_VMCS32_CTRL_ENTRY. */
570 uint32_t u32EntryCtls;
571 /** Physical address of the virtual APIC page for TPR caching. */
572 RTHCPHYS HCPhysVirtApic;
573 /** R0 memory object for the virtual APIC page for TPR caching. */
574 RTR0MEMOBJ hMemObjVirtApic;
575 /** Virtual address of the virtual APIC page for TPR caching. */
576 R0PTRTYPE(uint8_t *) pbVirtApic;
577#if HC_ARCH_BITS == 32
578 uint32_t u32Alignment2;
579#endif
580
581 /** Current CR0 mask. */
582 uint32_t u32CR0Mask;
583 /** Current CR4 mask. */
584 uint32_t u32CR4Mask;
585 /** Current exception bitmap. */
586 uint32_t u32XcptBitmap;
587 /** The updated-guest-state mask. */
588 uint32_t fUpdatedGuestState;
589 /** Current EPTP. */
590 RTHCPHYS HCPhysEPTP;
591
592 /** Physical address of the MSR bitmap. */
593 RTHCPHYS HCPhysMsrBitmap;
594 /** R0 memory object for the MSR bitmap. */
595 RTR0MEMOBJ hMemObjMsrBitmap;
596 /** Virtual address of the MSR bitmap. */
597 R0PTRTYPE(void *) pvMsrBitmap;
598
599#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
600 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
601 * for guest MSRs). */
602 RTHCPHYS HCPhysGuestMsr;
603 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
604 * (used for guest MSRs). */
605 RTR0MEMOBJ hMemObjGuestMsr;
606 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
607 * for guest MSRs). */
608 R0PTRTYPE(void *) pvGuestMsr;
609
610 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
611 RTHCPHYS HCPhysHostMsr;
612 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
613 RTR0MEMOBJ hMemObjHostMsr;
614 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
615 R0PTRTYPE(void *) pvHostMsr;
616
617 /** Number of automatically loaded/restored guest MSRs during the world switch. */
618 uint32_t cGuestMsrs;
619 uint32_t uAlignment;
620#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
621
622 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
623 uint64_t u64MsrApicBase;
624 /** Last use TSC offset value. (cached) */
625 uint64_t u64TSCOffset;
626 /** VMCS cache. */
627 VMCSCACHE VMCSCache;
628
629 /** Real-mode emulation state. */
630 struct
631 {
632 X86DESCATTR uAttrCS;
633 X86DESCATTR uAttrDS;
634 X86DESCATTR uAttrES;
635 X86DESCATTR uAttrFS;
636 X86DESCATTR uAttrGS;
637 X86DESCATTR uAttrSS;
638 X86EFLAGS eflags;
639 uint32_t fRealOnV86Active;
640 } RealMode;
641
642 struct
643 {
644 uint64_t u64VMCSPhys;
645 uint32_t u32VMCSRevision;
646 uint32_t u32InstrError;
647 uint32_t u32ExitReason;
648 RTCPUID idEnteredCpu;
649 RTCPUID idCurrentCpu;
650 uint32_t padding;
651 } LastError;
652
653 /** Which host-state bits to restore before being preempted. */
654 uint32_t fRestoreHostFlags;
655 /** The host-state restoration structure. */
656 VMXRESTOREHOST RestoreHost;
657 /** Set if guest was executing in real mode (extra checks). */
658 bool fWasInRealMode;
659 } vmx;
660
661 struct
662 {
663 /** R0 memory object for the host VMCB which holds additional host-state. */
664 RTR0MEMOBJ hMemObjVmcbHost;
665 /** Physical address of the host VMCB which holds additional host-state. */
666 RTHCPHYS HCPhysVmcbHost;
667 /** Virtual address of the host VMCB which holds additional host-state. */
668 R0PTRTYPE(void *) pvVmcbHost;
669
670 /** R0 memory object for the guest VMCB. */
671 RTR0MEMOBJ hMemObjVmcb;
672 /** Physical address of the guest VMCB. */
673 RTHCPHYS HCPhysVmcb;
674 /** Virtual address of the guest VMCB. */
675 R0PTRTYPE(void *) pvVmcb;
676
677 /** Ring 0 handlers for VT-x. */
678 PFNHMSVMVMRUN pfnVMRun;
679
680 /** R0 memory object for the MSR bitmap (8 KB). */
681 RTR0MEMOBJ hMemObjMsrBitmap;
682 /** Physical address of the MSR bitmap (8 KB). */
683 RTHCPHYS HCPhysMsrBitmap;
684 /** Virtual address of the MSR bitmap. */
685 R0PTRTYPE(void *) pvMsrBitmap;
686
687 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
688 * we should check if the VTPR changed on every VM-exit. */
689 bool fSyncVTpr;
690 uint8_t u8Align[7];
691
692 /** Alignment padding. */
693 uint32_t u32Padding;
694 } svm;
695
696 /** Event injection state. */
697 struct
698 {
699 uint32_t fPending;
700 uint32_t u32ErrCode;
701 uint32_t cbInstr;
702 uint32_t u32Padding; /**< Explicit alignment padding. */
703 uint64_t u64IntrInfo;
704 RTGCUINTPTR GCPtrFaultAddress;
705 } Event;
706
707 /** IO Block emulation state. */
708 struct
709 {
710 bool fEnabled;
711 uint8_t u8Align[7];
712
713 /** RIP at the start of the io code we wish to emulate in the recompiler. */
714 RTGCPTR GCPtrFunctionEip;
715
716 uint64_t cr0;
717 } EmulateIoBlock;
718
719 struct
720 {
721 /** Pending IO operation type. */
722 HMPENDINGIO enmType;
723 uint32_t uPadding;
724 RTGCPTR GCPtrRip;
725 RTGCPTR GCPtrRipNext;
726 union
727 {
728 struct
729 {
730 uint32_t uPort;
731 uint32_t uAndVal;
732 uint32_t cbSize;
733 } Port;
734 uint64_t aRaw[2];
735 } s;
736 } PendingIO;
737
738 /** The PAE PDPEs used with Nested Paging (only valid when
739 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
740 X86PDPE aPdpes[4];
741
742 /** Current shadow paging mode. */
743 PGMMODE enmShadowMode;
744
745 /** The CPU ID of the CPU currently owning the VMCS. Set in
746 * HMR0Enter and cleared in HMR0Leave. */
747 RTCPUID idEnteredCpu;
748
749 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
750 struct
751 {
752 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
753 uint32_t cPages;
754 uint32_t u32Padding; /**< Explicit alignment padding. */
755 } TlbShootdown;
756
757 /** For saving stack space, the disassembler state is allocated here instead of
758 * on the stack. */
759 DISCPUSTATE DisState;
760
761 STAMPROFILEADV StatEntry;
762 STAMPROFILEADV StatExit1;
763 STAMPROFILEADV StatExit2;
764 STAMPROFILEADV StatExitIO;
765 STAMPROFILEADV StatExitMovCRx;
766 STAMPROFILEADV StatExitXcptNmi;
767 STAMPROFILEADV StatLoadGuestState;
768 STAMPROFILEADV StatInGC;
769
770#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
771 STAMPROFILEADV StatWorldSwitch3264;
772#endif
773 STAMPROFILEADV StatPoke;
774 STAMPROFILEADV StatSpinPoke;
775 STAMPROFILEADV StatSpinPokeFailed;
776
777 STAMCOUNTER StatInjectInterrupt;
778 STAMCOUNTER StatInjectXcpt;
779 STAMCOUNTER StatInjectPendingReflect;
780
781 STAMCOUNTER StatExitShadowNM;
782 STAMCOUNTER StatExitGuestNM;
783 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
784 STAMCOUNTER StatExitShadowPFEM;
785 STAMCOUNTER StatExitGuestPF;
786 STAMCOUNTER StatExitGuestUD;
787 STAMCOUNTER StatExitGuestSS;
788 STAMCOUNTER StatExitGuestNP;
789 STAMCOUNTER StatExitGuestGP;
790 STAMCOUNTER StatExitGuestDE;
791 STAMCOUNTER StatExitGuestDB;
792 STAMCOUNTER StatExitGuestMF;
793 STAMCOUNTER StatExitGuestBP;
794 STAMCOUNTER StatExitGuestXF;
795 STAMCOUNTER StatExitGuestXcpUnk;
796 STAMCOUNTER StatExitInvlpg;
797 STAMCOUNTER StatExitInvd;
798 STAMCOUNTER StatExitWbinvd;
799 STAMCOUNTER StatExitPause;
800 STAMCOUNTER StatExitCpuid;
801 STAMCOUNTER StatExitRdtsc;
802 STAMCOUNTER StatExitRdtscp;
803 STAMCOUNTER StatExitRdpmc;
804 STAMCOUNTER StatExitRdrand;
805 STAMCOUNTER StatExitCli;
806 STAMCOUNTER StatExitSti;
807 STAMCOUNTER StatExitPushf;
808 STAMCOUNTER StatExitPopf;
809 STAMCOUNTER StatExitIret;
810 STAMCOUNTER StatExitInt;
811 STAMCOUNTER StatExitCRxWrite[16];
812 STAMCOUNTER StatExitCRxRead[16];
813 STAMCOUNTER StatExitDRxWrite;
814 STAMCOUNTER StatExitDRxRead;
815 STAMCOUNTER StatExitRdmsr;
816 STAMCOUNTER StatExitWrmsr;
817 STAMCOUNTER StatExitClts;
818 STAMCOUNTER StatExitXdtrAccess;
819 STAMCOUNTER StatExitHlt;
820 STAMCOUNTER StatExitMwait;
821 STAMCOUNTER StatExitMonitor;
822 STAMCOUNTER StatExitLmsw;
823 STAMCOUNTER StatExitIOWrite;
824 STAMCOUNTER StatExitIORead;
825 STAMCOUNTER StatExitIOStringWrite;
826 STAMCOUNTER StatExitIOStringRead;
827 STAMCOUNTER StatExitIntWindow;
828 STAMCOUNTER StatExitMaxResume;
829 STAMCOUNTER StatExitExtInt;
830 STAMCOUNTER StatExitHostNmi;
831 STAMCOUNTER StatExitPreemptTimer;
832 STAMCOUNTER StatExitTprBelowThreshold;
833 STAMCOUNTER StatExitTaskSwitch;
834 STAMCOUNTER StatExitMtf;
835 STAMCOUNTER StatExitApicAccess;
836 STAMCOUNTER StatPendingHostIrq;
837
838 STAMCOUNTER StatFlushPage;
839 STAMCOUNTER StatFlushPageManual;
840 STAMCOUNTER StatFlushPhysPageManual;
841 STAMCOUNTER StatFlushTlb;
842 STAMCOUNTER StatFlushTlbManual;
843 STAMCOUNTER StatFlushTlbWorldSwitch;
844 STAMCOUNTER StatNoFlushTlbWorldSwitch;
845 STAMCOUNTER StatFlushEntire;
846 STAMCOUNTER StatFlushAsid;
847 STAMCOUNTER StatFlushNestedPaging;
848 STAMCOUNTER StatFlushTlbInvlpgVirt;
849 STAMCOUNTER StatFlushTlbInvlpgPhys;
850 STAMCOUNTER StatTlbShootdown;
851 STAMCOUNTER StatTlbShootdownFlush;
852
853 STAMCOUNTER StatSwitchGuestIrq;
854 STAMCOUNTER StatSwitchHmToR3FF;
855 STAMCOUNTER StatSwitchExitToR3;
856 STAMCOUNTER StatSwitchLongJmpToR3;
857
858 STAMCOUNTER StatTscOffset;
859 STAMCOUNTER StatTscIntercept;
860 STAMCOUNTER StatTscInterceptOverFlow;
861
862 STAMCOUNTER StatExitReasonNpf;
863 STAMCOUNTER StatDRxArmed;
864 STAMCOUNTER StatDRxContextSwitch;
865 STAMCOUNTER StatDRxIoCheck;
866
867 STAMCOUNTER StatLoadMinimal;
868 STAMCOUNTER StatLoadFull;
869
870 STAMCOUNTER StatVmxCheckBadRmSelBase;
871 STAMCOUNTER StatVmxCheckBadRmSelLimit;
872 STAMCOUNTER StatVmxCheckRmOk;
873
874 STAMCOUNTER StatVmxCheckBadSel;
875 STAMCOUNTER StatVmxCheckBadRpl;
876 STAMCOUNTER StatVmxCheckBadLdt;
877 STAMCOUNTER StatVmxCheckBadTr;
878 STAMCOUNTER StatVmxCheckPmOk;
879
880#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
881 STAMCOUNTER StatFpu64SwitchBack;
882 STAMCOUNTER StatDebug64SwitchBack;
883#endif
884
885#ifdef VBOX_WITH_STATISTICS
886 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
887 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
888 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
889 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
890#endif
891#ifdef HM_PROFILE_EXIT_DISPATCH
892 STAMPROFILEADV StatExitDispatch;
893#endif
894} HMCPU;
895/** Pointer to HM VM instance data. */
896typedef HMCPU *PHMCPU;
897
898
899#ifdef IN_RING0
900
901VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
902VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
903
904
905#ifdef VBOX_STRICT
906VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
907VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
908#else
909# define HMDumpRegs(a, b ,c) do { } while (0)
910# define HMR0DumpDescriptor(a, b, c) do { } while (0)
911#endif
912
913# ifdef VBOX_WITH_KERNEL_USING_XMM
914DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
915DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
916# endif
917
918# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
919/**
920 * Gets 64-bit GDTR and IDTR on darwin.
921 * @param pGdtr Where to store the 64-bit GDTR.
922 * @param pIdtr Where to store the 64-bit IDTR.
923 */
924DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
925
926/**
927 * Gets 64-bit CR3 on darwin.
928 * @returns CR3
929 */
930DECLASM(uint64_t) HMR0Get64bitCR3(void);
931# endif
932
933#endif /* IN_RING0 */
934
935/** @} */
936
937RT_C_DECLS_END
938
939#endif
940
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette