VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EMHM.cpp@ 47786

最後變更 在這個檔案從47786是 47671,由 vboxsync 提交於 12 年 前

VMM: More debugging related stuff.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 24.3 KB
 
1/* $Id: EMHM.cpp 47671 2013-08-12 11:16:55Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - hardware virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_EM
22#include <VBox/vmm/em.h>
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/csam.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/pgm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmapi.h>
38#include <VBox/vmm/pdmcritsect.h>
39#include <VBox/vmm/pdmqueue.h>
40#include <VBox/vmm/hm.h>
41#include "EMInternal.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/cpumdis.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <VBox/vmm/dbgf.h>
47#include "VMMTracing.h"
48
49#include <iprt/asm.h>
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
56#define EM_NOTIFY_HM
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
64static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
65static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
66
67#define EMHANDLERC_WITH_HM
68#include "EMHandleRCTmpl.h"
69
70
71#if defined(DEBUG) && defined(SOME_UNUSED_FUNCTIONS)
72
73/**
74 * Steps hardware accelerated mode.
75 *
76 * @returns VBox status code.
77 * @param pVM Pointer to the VM.
78 * @param pVCpu Pointer to the VMCPU.
79 */
80static int emR3HmStep(PVM pVM, PVMCPU pVCpu)
81{
82 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM);
83
84 int rc;
85 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
86# ifdef VBOX_WITH_RAW_MODE
87 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
88# endif
89
90 /*
91 * Check vital forced actions, but ignore pending interrupts and timers.
92 */
93 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
94 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
95 {
96 rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
97 if (rc != VINF_SUCCESS)
98 return rc;
99 }
100 /*
101 * Set flags for single stepping.
102 */
103 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
104
105 /*
106 * Single step.
107 * We do not start time or anything, if anything we should just do a few nanoseconds.
108 */
109 do
110 {
111 rc = VMMR3HmRunGC(pVM, pVCpu);
112 } while ( rc == VINF_SUCCESS
113 || rc == VINF_EM_RAW_INTERRUPT);
114 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
115
116 /*
117 * Make sure the trap flag is cleared.
118 * (Too bad if the guest is trying to single step too.)
119 */
120 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
121
122 /*
123 * Deal with the return codes.
124 */
125 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
126 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
127 return rc;
128}
129
130
131static int emR3SingleStepExecHm(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
132{
133 int rc = VINF_SUCCESS;
134 EMSTATE enmOldState = pVCpu->em.s.enmState;
135 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
136
137 Log(("Single step BEGIN:\n"));
138 for (uint32_t i = 0; i < cIterations; i++)
139 {
140 DBGFR3PrgStep(pVCpu);
141 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
142 rc = emR3HmStep(pVM, pVCpu);
143 if ( rc != VINF_SUCCESS
144 || !HMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
145 break;
146 }
147 Log(("Single step END: rc=%Rrc\n", rc));
148 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
149 pVCpu->em.s.enmState = enmOldState;
150 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
151}
152
153#endif /* DEBUG */
154
155
156/**
157 * Executes instruction in HM mode if we can.
158 *
159 * This is somewhat comparable to REMR3EmulateInstruction.
160 *
161 * @returns VBox strict status code.
162 * @retval VINF_EM_DBG_STEPPED on success.
163 * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
164 * HM right now.
165 *
166 * @param pVM Pointer to the cross context VM structure.
167 * @param pVCpu Pointer to the cross context CPU structure for
168 * the calling EMT.
169 * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
170 * @thread EMT.
171 */
172VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
173{
174 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
175 Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
176
177 if (!HMR3CanExecuteGuest(pVM, pCtx))
178 return VINF_EM_RESCHEDULE;
179
180 uint64_t const uOldRip = pCtx->rip;
181 for (;;)
182 {
183 /*
184 * Service necessary FFs before going into HM.
185 */
186 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
187 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
188 {
189 VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu, pCtx);
190 if (rcStrict != VINF_SUCCESS)
191 {
192 Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
193 return rcStrict;
194 }
195 }
196
197 /*
198 * Go execute it.
199 */
200 bool fOld = HMSetSingleInstruction(pVCpu, true);
201 VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);
202 HMSetSingleInstruction(pVCpu, fOld);
203 LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
204
205 /*
206 * Handle high priority FFs and informational status codes. We don't do
207 * normal FF processing the caller or the next call can deal with them.
208 */
209 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
210 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
211 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
212 {
213 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
214 LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
215 }
216
217 if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
218 {
219 rcStrict = emR3HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict));
220 Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
221 }
222
223 /*
224 * Done?
225 */
226 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
227 || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
228 || pCtx->rip != uOldRip)
229 {
230 if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip)
231 rcStrict = VINF_EM_DBG_STEPPED;
232 Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip));
233 return rcStrict;
234 }
235 }
236}
237
238
239/**
240 * Executes one (or perhaps a few more) instruction(s).
241 *
242 * @returns VBox status code suitable for EM.
243 *
244 * @param pVM Pointer to the VM.
245 * @param pVCpu Pointer to the VMCPU.
246 * @param rcRC Return code from RC.
247 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
248 * instruction and prefix the log output with this text.
249 */
250#ifdef LOG_ENABLED
251static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
252#else
253static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
254#endif
255{
256#ifdef LOG_ENABLED
257 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
258#endif
259 int rc;
260 NOREF(rcRC);
261
262 /*
263 *
264 * The simple solution is to use the recompiler.
265 * The better solution is to disassemble the current instruction and
266 * try handle as many as possible without using REM.
267 *
268 */
269
270#ifdef LOG_ENABLED
271 /*
272 * Disassemble the instruction if requested.
273 */
274 if (pszPrefix)
275 {
276 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
277 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
278 }
279#endif /* LOG_ENABLED */
280
281#if 0
282 /* Try our own instruction emulator before falling back to the recompiler. */
283 DISCPUSTATE Cpu;
284 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
285 if (RT_SUCCESS(rc))
286 {
287 switch (Cpu.pCurInstr->uOpcode)
288 {
289 /* @todo we can do more now */
290 case OP_MOV:
291 case OP_AND:
292 case OP_OR:
293 case OP_XOR:
294 case OP_POP:
295 case OP_INC:
296 case OP_DEC:
297 case OP_XCHG:
298 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
299 rc = EMInterpretInstructionCpuUpdtPC(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0);
300 if (RT_SUCCESS(rc))
301 {
302#ifdef EM_NOTIFY_HM
303 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
304 HMR3NotifyEmulated(pVCpu);
305#endif
306 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
307 return rc;
308 }
309 if (rc != VERR_EM_INTERPRETER)
310 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
311 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
312 break;
313 }
314 }
315#endif /* 0 */
316 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
317 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
318#ifdef VBOX_WITH_REM
319 EMRemLock(pVM);
320 /* Flush the recompiler TLB if the VCPU has changed. */
321 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
322 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
323 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
324
325 rc = REMR3EmulateInstruction(pVM, pVCpu);
326 EMRemUnlock(pVM);
327#else
328 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
329#endif
330 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
331
332#ifdef EM_NOTIFY_HM
333 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
334 HMR3NotifyEmulated(pVCpu);
335#endif
336 return rc;
337}
338
339
340/**
341 * Executes one (or perhaps a few more) instruction(s).
342 * This is just a wrapper for discarding pszPrefix in non-logging builds.
343 *
344 * @returns VBox status code suitable for EM.
345 * @param pVM Pointer to the VM.
346 * @param pVCpu Pointer to the VMCPU.
347 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
348 * instruction and prefix the log output with this text.
349 * @param rcGC GC return code
350 */
351DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
352{
353#ifdef LOG_ENABLED
354 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
355#else
356 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
357#endif
358}
359
360/**
361 * Executes one (or perhaps a few more) IO instruction(s).
362 *
363 * @returns VBox status code suitable for EM.
364 * @param pVM Pointer to the VM.
365 * @param pVCpu Pointer to the VMCPU.
366 */
367static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
368{
369 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
370
371 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
372
373 /* Try to restart the io instruction that was refused in ring-0. */
374 VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
375 if (IOM_SUCCESS(rcStrict))
376 {
377 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
378 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
379 return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
380 }
381 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
382 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
383
384#ifdef VBOX_WITH_FIRST_IEM_STEP
385 /* Hand it over to the interpreter. */
386 rcStrict = IEMExecOne(pVCpu);
387 LogFlow(("emR3ExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
388 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
389 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
390 return VBOXSTRICTRC_TODO(rcStrict);
391
392#else
393 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
394 * as io instructions tend to come in packages of more than one
395 */
396 DISCPUSTATE Cpu;
397 int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
398 if (RT_SUCCESS(rc2))
399 {
400 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
401
402 if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE)))
403 {
404 switch (Cpu.pCurInstr->uOpcode)
405 {
406 case OP_IN:
407 {
408 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
409 rcStrict = IOMInterpretIN(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
410 break;
411 }
412
413 case OP_OUT:
414 {
415 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
416 rcStrict = IOMInterpretOUT(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
417 break;
418 }
419 }
420 }
421 else if (Cpu.fPrefix & DISPREFIX_REP)
422 {
423 switch (Cpu.pCurInstr->uOpcode)
424 {
425 case OP_INSB:
426 case OP_INSWD:
427 {
428 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
429 rcStrict = IOMInterpretINS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
430 break;
431 }
432
433 case OP_OUTSB:
434 case OP_OUTSWD:
435 {
436 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
437 rcStrict = IOMInterpretOUTS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
438 break;
439 }
440 }
441 }
442
443 /*
444 * Handled the I/O return codes.
445 * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
446 */
447 if (IOM_SUCCESS(rcStrict))
448 {
449 pCtx->rip += Cpu.cbInstr;
450 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
451 LogFlow(("emR3ExecuteIOInstruction: %Rrc 1\n", VBOXSTRICTRC_VAL(rcStrict)));
452 return VBOXSTRICTRC_TODO(rcStrict);
453 }
454
455 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
456 {
457 /* The active trap will be dispatched. */
458 Assert(TRPMHasTrap(pVCpu));
459 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
460 LogFlow(("emR3ExecuteIOInstruction: VINF_SUCCESS 2\n"));
461 return VINF_SUCCESS;
462 }
463 AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
464
465 if (RT_FAILURE(rcStrict))
466 {
467 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
468 LogFlow(("emR3ExecuteIOInstruction: %Rrc 3\n", VBOXSTRICTRC_VAL(rcStrict)));
469 return VBOXSTRICTRC_TODO(rcStrict);
470 }
471 AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
472 }
473
474 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
475 int rc3 = emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
476 LogFlow(("emR3ExecuteIOInstruction: %Rrc 4 (rc2=%Rrc, rc3=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict), rc2, rc3));
477 return rc3;
478#endif
479}
480
481
482/**
483 * Process raw-mode specific forced actions.
484 *
485 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
486 *
487 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
488 * EM statuses.
489 * @param pVM Pointer to the VM.
490 * @param pVCpu Pointer to the VMCPU.
491 * @param pCtx Pointer to the guest CPU context.
492 */
493static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
494{
495 /*
496 * Sync page directory.
497 */
498 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
499 {
500 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
501 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
502 if (RT_FAILURE(rc))
503 return rc;
504
505#ifdef VBOX_WITH_RAW_MODE
506 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
507#endif
508
509 /* Prefetch pages for EIP and ESP. */
510 /** @todo This is rather expensive. Should investigate if it really helps at all. */
511 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
512 if (rc == VINF_SUCCESS)
513 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
514 if (rc != VINF_SUCCESS)
515 {
516 if (rc != VINF_PGM_SYNC_CR3)
517 {
518 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
519 return rc;
520 }
521 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
522 if (RT_FAILURE(rc))
523 return rc;
524 }
525 /** @todo maybe prefetch the supervisor stack page as well */
526#ifdef VBOX_WITH_RAW_MODE
527 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
528#endif
529 }
530
531 /*
532 * Allocate handy pages (just in case the above actions have consumed some pages).
533 */
534 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
535 {
536 int rc = PGMR3PhysAllocateHandyPages(pVM);
537 if (RT_FAILURE(rc))
538 return rc;
539 }
540
541 /*
542 * Check whether we're out of memory now.
543 *
544 * This may stem from some of the above actions or operations that has been executed
545 * since we ran FFs. The allocate handy pages must for instance always be followed by
546 * this check.
547 */
548 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
549 return VINF_EM_NO_MEMORY;
550
551 return VINF_SUCCESS;
552}
553
554
555/**
556 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
557 *
558 * This function contains the raw-mode version of the inner
559 * execution loop (the outer loop being in EMR3ExecuteVM()).
560 *
561 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
562 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
563 *
564 * @param pVM Pointer to the VM.
565 * @param pVCpu Pointer to the VMCPU.
566 * @param pfFFDone Where to store an indicator telling whether or not
567 * FFs were done before returning.
568 */
569int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
570{
571 int rc = VERR_IPE_UNINITIALIZED_STATUS;
572 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
573
574 LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
575 *pfFFDone = false;
576
577 STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry);
578
579#ifdef EM_NOTIFY_HM
580 HMR3NotifyScheduled(pVCpu);
581#endif
582
583 /*
584 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
585 */
586 for (;;)
587 {
588 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a);
589
590 /* Check if a forced reschedule is pending. */
591 if (HMR3IsRescheduleRequired(pVM, pCtx))
592 {
593 rc = VINF_EM_RESCHEDULE;
594 break;
595 }
596
597 /*
598 * Process high priority pre-execution raw-mode FFs.
599 */
600#ifdef VBOX_WITH_RAW_MODE
601 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
602#endif
603 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
604 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
605 {
606 rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
607 if (rc != VINF_SUCCESS)
608 break;
609 }
610
611#ifdef LOG_ENABLED
612 /*
613 * Log important stuff before entering GC.
614 */
615 if (TRPMHasTrap(pVCpu))
616 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
617
618 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
619
620 if (pVM->cCpus == 1)
621 {
622 if (pCtx->eflags.Bits.u1VM)
623 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
624 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
625 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
626 else
627 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
628 }
629 else
630 {
631 if (pCtx->eflags.Bits.u1VM)
632 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
633 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
634 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
635 else
636 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
637 }
638#endif /* LOG_ENABLED */
639
640 /*
641 * Execute the code.
642 */
643 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a);
644
645 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
646 {
647 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x);
648 rc = VMMR3HmRunGC(pVM, pVCpu);
649 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x);
650 }
651 else
652 {
653 /* Give up this time slice; virtual time continues */
654 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
655 RTThreadSleep(5);
656 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
657 rc = VINF_SUCCESS;
658 }
659
660
661 /*
662 * Deal with high priority post execution FFs before doing anything else.
663 */
664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
665 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
666 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
667 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
668
669 /*
670 * Process the returned status code.
671 */
672 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
673 break;
674
675 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
676 if (rc != VINF_SUCCESS)
677 break;
678
679 /*
680 * Check and execute forced actions.
681 */
682#ifdef VBOX_HIGH_RES_TIMERS_HACK
683 TMTimerPollVoid(pVM, pVCpu);
684#endif
685 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
686 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
687 {
688 rc = emR3ForcedActions(pVM, pVCpu, rc);
689 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
690 if ( rc != VINF_SUCCESS
691 && rc != VINF_EM_RESCHEDULE_HM)
692 {
693 *pfFFDone = true;
694 break;
695 }
696 }
697 }
698
699 /*
700 * Return to outer loop.
701 */
702#if defined(LOG_ENABLED) && defined(DEBUG)
703 RTLogFlush(NULL);
704#endif
705 return rc;
706}
707
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette