VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 80281

最後變更 在這個檔案從80281是 80281,由 vboxsync 提交於 6 年 前

VMM,++: Refactoring code to use VMMC & VMMCPUCC. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 121.6 KB
 
1/* $Id: EM.cpp 80281 2019-08-15 07:29:37Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define VBOX_BUGREF_9217_PART_I
39#define LOG_GROUP LOG_GROUP_EM
40#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/vmm.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/nem.h>
47#include <VBox/vmm/iom.h>
48#include <VBox/vmm/dbgf.h>
49#include <VBox/vmm/pgm.h>
50#ifdef VBOX_WITH_REM
51# include <VBox/vmm/rem.h>
52#endif
53#include <VBox/vmm/apic.h>
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/mm.h>
56#include <VBox/vmm/ssm.h>
57#include <VBox/vmm/pdmapi.h>
58#include <VBox/vmm/pdmcritsect.h>
59#include <VBox/vmm/pdmqueue.h>
60#include <VBox/vmm/hm.h>
61#include "EMInternal.h"
62#include <VBox/vmm/vm.h>
63#include <VBox/vmm/uvm.h>
64#include <VBox/vmm/cpumdis.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode.h>
67#include <VBox/err.h>
68#include "VMMTracing.h"
69
70#include <iprt/asm.h>
71#include <iprt/string.h>
72#include <iprt/stream.h>
73#include <iprt/thread.h>
74
75
76/*********************************************************************************************************************************
77* Internal Functions *
78*********************************************************************************************************************************/
79static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
80static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
81#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
82static const char *emR3GetStateName(EMSTATE enmState);
83#endif
84static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
85#if defined(VBOX_WITH_REM) || defined(DEBUG)
86static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
87#endif
88static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
89
90
91/**
92 * Initializes the EM.
93 *
94 * @returns VBox status code.
95 * @param pVM The cross context VM structure.
96 */
97VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
98{
99 LogFlow(("EMR3Init\n"));
100 /*
101 * Assert alignment and sizes.
102 */
103 AssertCompileMemberAlignment(VM, em.s, 32);
104 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
105 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
106 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
107
108 /*
109 * Init the structure.
110 */
111 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
112 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
113
114 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
115 AssertLogRelRCReturn(rc, rc);
116
117 bool fEnabled;
118 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
119 AssertLogRelRCReturn(rc, rc);
120 pVM->em.s.fGuruOnTripleFault = !fEnabled;
121 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
122 {
123 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
124 pVM->em.s.fGuruOnTripleFault = true;
125 }
126
127 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
128
129 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
130 * Whether to try correlate exit history in any context, detect hot spots and
131 * try optimize these using IEM if there are other exits close by. This
132 * overrides the context specific settings. */
133 bool fExitOptimizationEnabled = true;
134 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
135 AssertLogRelRCReturn(rc, rc);
136
137 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
138 * Whether to optimize exits in ring-0. Setting this to false will also disable
139 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
140 * capabilities of the host kernel, this optimization may be unavailable. */
141 bool fExitOptimizationEnabledR0 = true;
142 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
143 AssertLogRelRCReturn(rc, rc);
144 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
147 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
148 * hooks are in effect). */
149 /** @todo change the default to true here */
150 bool fExitOptimizationEnabledR0PreemptDisabled = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
154
155 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
156 * Maximum number of instruction to let EMHistoryExec execute in one go. */
157 uint16_t cHistoryExecMaxInstructions = 8192;
158 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
159 AssertLogRelRCReturn(rc, rc);
160 if (cHistoryExecMaxInstructions < 16)
161 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
162
163 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
164 * Maximum number of instruction between exits during probing. */
165 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
166#ifdef RT_OS_WINDOWS
167 if (VM_IS_NEM_ENABLED(pVM))
168 cHistoryProbeMaxInstructionsWithoutExit = 32;
169#endif
170 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
171 cHistoryProbeMaxInstructionsWithoutExit);
172 AssertLogRelRCReturn(rc, rc);
173 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
174 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
175 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
176
177 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
178 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
179 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
180 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
181 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
182 cHistoryProbeMinInstructions);
183 AssertLogRelRCReturn(rc, rc);
184
185 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
186 {
187 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
188 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
189 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
190 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
191 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
192 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
193 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
194 }
195
196#ifdef VBOX_WITH_REM
197 /*
198 * Initialize the REM critical section.
199 */
200 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
201 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
202 AssertRCReturn(rc, rc);
203#endif
204
205 /*
206 * Saved state.
207 */
208 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
209 NULL, NULL, NULL,
210 NULL, emR3Save, NULL,
211 NULL, emR3Load, NULL);
212 if (RT_FAILURE(rc))
213 return rc;
214
215 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
216 {
217 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
218
219 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
220 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
221 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
222 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
223
224# define EM_REG_COUNTER(a, b, c) \
225 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
226 AssertRC(rc);
227
228# define EM_REG_COUNTER_USED(a, b, c) \
229 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
230 AssertRC(rc);
231
232# define EM_REG_PROFILE(a, b, c) \
233 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
234 AssertRC(rc);
235
236# define EM_REG_PROFILE_ADV(a, b, c) \
237 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
238 AssertRC(rc);
239
240 /*
241 * Statistics.
242 */
243#ifdef VBOX_WITH_STATISTICS
244 PEMSTATS pStats;
245 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
246 if (RT_FAILURE(rc))
247 return rc;
248
249 pVCpu->em.s.pStatsR3 = pStats;
250 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
251
252# if 1 /* rawmode only? */
253 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
254 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
255 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%u/R3/PrivInst/Cli", "Number of cli instructions.");
256 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%u/R3/PrivInst/Sti", "Number of sli instructions.");
257 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%u/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
258 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%u/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
259 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%u/R3/PrivInst/Misc", "Number of misc. instructions.");
260 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%u/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
261 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%u/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
262 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%u/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
263 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%u/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
264 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%u/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
265 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%u/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
266 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%u/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
267 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%u/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
268 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%u/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
269 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%u/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
270 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%u/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
271 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%u/R3/PrivInst/Iret", "Number of iret instructions.");
272 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%u/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
273 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%u/R3/PrivInst/Lidt", "Number of lidt instructions.");
274 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%u/R3/PrivInst/Lldt", "Number of lldt instructions.");
275 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%u/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
276 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%u/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
277 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%u/R3/PrivInst/Syscall", "Number of syscall instructions.");
278 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%u/R3/PrivInst/Sysret", "Number of sysret instructions.");
279 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%u/Cli/Total", "Total number of cli instructions executed.");
280#endif
281 pVCpu->em.s.pCliStatTree = 0;
282
283 /* these should be considered for release statistics. */
284 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
285 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
286 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
287 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
288 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
289 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
290 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
291 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
292#endif /* VBOX_WITH_STATISTICS */
293 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
294 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
295#ifdef VBOX_WITH_STATISTICS
296 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
297 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
298 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
299 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
300 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
301 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
302#endif /* VBOX_WITH_STATISTICS */
303
304 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
306 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
307 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
308 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
309
310 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
311
312 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
313 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
314 AssertRC(rc);
315
316 /* History record statistics */
317 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
318 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
319 AssertRC(rc);
320
321 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
322 {
323 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
324 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
325 AssertRC(rc);
326 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
327 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
328 AssertRC(rc);
329 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
330 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
331 AssertRC(rc);
332 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
333 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
334 AssertRC(rc);
335 }
336
337 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
338 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
339 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
340 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
341 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
342 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
343 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
344 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
345 }
346
347 emR3InitDbg(pVM);
348 return VINF_SUCCESS;
349}
350
351
352/**
353 * Called when a VM initialization stage is completed.
354 *
355 * @returns VBox status code.
356 * @param pVM The cross context VM structure.
357 * @param enmWhat The initialization state that was completed.
358 */
359VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
360{
361 if (enmWhat == VMINITCOMPLETED_RING0)
362 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
363 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
364 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
365 return VINF_SUCCESS;
366}
367
368
369/**
370 * Applies relocations to data and code managed by this
371 * component. This function will be called at init and
372 * whenever the VMM need to relocate it self inside the GC.
373 *
374 * @param pVM The cross context VM structure.
375 */
376VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
377{
378 LogFlow(("EMR3Relocate\n"));
379 RT_NOREF(pVM);
380}
381
382
383/**
384 * Reset the EM state for a CPU.
385 *
386 * Called by EMR3Reset and hot plugging.
387 *
388 * @param pVCpu The cross context virtual CPU structure.
389 */
390VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
391{
392 /* Reset scheduling state. */
393 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
394
395 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
396 out of the HALTED state here so that enmPrevState doesn't end up as
397 HALTED when EMR3Execute returns. */
398 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
399 {
400 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
401 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
402 }
403}
404
405
406/**
407 * Reset notification.
408 *
409 * @param pVM The cross context VM structure.
410 */
411VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
412{
413 Log(("EMR3Reset: \n"));
414 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
415 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
416}
417
418
419/**
420 * Terminates the EM.
421 *
422 * Termination means cleaning up and freeing all resources,
423 * the VM it self is at this point powered off or suspended.
424 *
425 * @returns VBox status code.
426 * @param pVM The cross context VM structure.
427 */
428VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
429{
430#ifdef VBOX_WITH_REM
431 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
432#else
433 RT_NOREF(pVM);
434#endif
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Execute state save operation.
441 *
442 * @returns VBox status code.
443 * @param pVM The cross context VM structure.
444 * @param pSSM SSM operation handle.
445 */
446static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
447{
448 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
449 {
450 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
451
452 SSMR3PutBool(pSSM, false /*fForceRAW*/);
453
454 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
455 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
456 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
457
458 /* Save mwait state. */
459 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
460 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
461 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
462 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
463 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
464 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
465 AssertRCReturn(rc, rc);
466 }
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Execute state load operation.
473 *
474 * @returns VBox status code.
475 * @param pVM The cross context VM structure.
476 * @param pSSM SSM operation handle.
477 * @param uVersion Data layout version.
478 * @param uPass The data pass.
479 */
480static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
481{
482 /*
483 * Validate version.
484 */
485 if ( uVersion > EM_SAVED_STATE_VERSION
486 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
487 {
488 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
489 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
490 }
491 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
492
493 /*
494 * Load the saved state.
495 */
496 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
497 {
498 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
499
500 bool fForceRAWIgnored;
501 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
502 AssertRCReturn(rc, rc);
503
504 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
505 {
506 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
507 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
508 AssertRCReturn(rc, rc);
509 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
510
511 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
512 }
513 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
514 {
515 /* Load mwait state. */
516 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
517 AssertRCReturn(rc, rc);
518 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
519 AssertRCReturn(rc, rc);
520 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
521 AssertRCReturn(rc, rc);
522 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
523 AssertRCReturn(rc, rc);
524 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
525 AssertRCReturn(rc, rc);
526 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
527 AssertRCReturn(rc, rc);
528 }
529
530 Assert(!pVCpu->em.s.pCliStatTree);
531 }
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Argument packet for emR3SetExecutionPolicy.
538 */
539struct EMR3SETEXECPOLICYARGS
540{
541 EMEXECPOLICY enmPolicy;
542 bool fEnforce;
543};
544
545
546/**
547 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
548 */
549static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
550{
551 /*
552 * Only the first CPU changes the variables.
553 */
554 if (pVCpu->idCpu == 0)
555 {
556 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
557 switch (pArgs->enmPolicy)
558 {
559 case EMEXECPOLICY_RECOMPILE_RING0:
560 case EMEXECPOLICY_RECOMPILE_RING3:
561 break;
562 case EMEXECPOLICY_IEM_ALL:
563 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
564 break;
565 default:
566 AssertFailedReturn(VERR_INVALID_PARAMETER);
567 }
568 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
569 }
570
571 /*
572 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
573 */
574 return pVCpu->em.s.enmState == EMSTATE_RAW
575 || pVCpu->em.s.enmState == EMSTATE_HM
576 || pVCpu->em.s.enmState == EMSTATE_NEM
577 || pVCpu->em.s.enmState == EMSTATE_IEM
578 || pVCpu->em.s.enmState == EMSTATE_REM
579 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
580 ? VINF_EM_RESCHEDULE
581 : VINF_SUCCESS;
582}
583
584
585/**
586 * Changes an execution scheduling policy parameter.
587 *
588 * This is used to enable or disable raw-mode / hardware-virtualization
589 * execution of user and supervisor code.
590 *
591 * @returns VINF_SUCCESS on success.
592 * @returns VINF_RESCHEDULE if a rescheduling might be required.
593 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
594 *
595 * @param pUVM The user mode VM handle.
596 * @param enmPolicy The scheduling policy to change.
597 * @param fEnforce Whether to enforce the policy or not.
598 */
599VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
600{
601 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
602 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
603 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
604
605 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
606 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
607}
608
609
610/**
611 * Queries an execution scheduling policy parameter.
612 *
613 * @returns VBox status code
614 * @param pUVM The user mode VM handle.
615 * @param enmPolicy The scheduling policy to query.
616 * @param pfEnforced Where to return the current value.
617 */
618VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
619{
620 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
621 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
622 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
623 PVM pVM = pUVM->pVM;
624 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
625
626 /* No need to bother EMTs with a query. */
627 switch (enmPolicy)
628 {
629 case EMEXECPOLICY_RECOMPILE_RING0:
630 case EMEXECPOLICY_RECOMPILE_RING3:
631 *pfEnforced = false;
632 break;
633 case EMEXECPOLICY_IEM_ALL:
634 *pfEnforced = pVM->em.s.fIemExecutesAll;
635 break;
636 default:
637 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
638 }
639
640 return VINF_SUCCESS;
641}
642
643
644/**
645 * Queries the main execution engine of the VM.
646 *
647 * @returns VBox status code
648 * @param pUVM The user mode VM handle.
649 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
650 */
651VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
652{
653 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
654 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
655
656 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
657 PVM pVM = pUVM->pVM;
658 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
659
660 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
661 return VINF_SUCCESS;
662}
663
664
665/**
666 * Raise a fatal error.
667 *
668 * Safely terminate the VM with full state report and stuff. This function
669 * will naturally never return.
670 *
671 * @param pVCpu The cross context virtual CPU structure.
672 * @param rc VBox status code.
673 */
674VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
675{
676 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
677 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
678}
679
680
681#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
682/**
683 * Gets the EM state name.
684 *
685 * @returns pointer to read only state name,
686 * @param enmState The state.
687 */
688static const char *emR3GetStateName(EMSTATE enmState)
689{
690 switch (enmState)
691 {
692 case EMSTATE_NONE: return "EMSTATE_NONE";
693 case EMSTATE_RAW: return "EMSTATE_RAW";
694 case EMSTATE_HM: return "EMSTATE_HM";
695 case EMSTATE_IEM: return "EMSTATE_IEM";
696 case EMSTATE_REM: return "EMSTATE_REM";
697 case EMSTATE_HALTED: return "EMSTATE_HALTED";
698 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
699 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
700 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
701 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
702 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
703 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
704 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
705 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
706 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
707 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
708 case EMSTATE_NEM: return "EMSTATE_NEM";
709 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
710 default: return "Unknown!";
711 }
712}
713#endif /* LOG_ENABLED || VBOX_STRICT */
714
715
716/**
717 * Handle pending ring-3 I/O port write.
718 *
719 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
720 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
721 *
722 * @returns Strict VBox status code.
723 * @param pVM The cross context VM structure.
724 * @param pVCpu The cross context virtual CPU structure.
725 */
726VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
727{
728 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
729
730 /* Get and clear the pending data. */
731 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
732 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
733 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
734 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
735 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
736
737 /* Assert sanity. */
738 switch (cbValue)
739 {
740 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
741 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
742 case 4: break;
743 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
744 }
745 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
746
747 /* Do the work.*/
748 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
749 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
750 if (IOM_SUCCESS(rcStrict))
751 {
752 pVCpu->cpum.GstCtx.rip += cbInstr;
753 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
754 }
755 return rcStrict;
756}
757
758
759/**
760 * Handle pending ring-3 I/O port write.
761 *
762 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
763 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
764 *
765 * @returns Strict VBox status code.
766 * @param pVM The cross context VM structure.
767 * @param pVCpu The cross context virtual CPU structure.
768 */
769VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
770{
771 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
772
773 /* Get and clear the pending data. */
774 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
775 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
776 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
777 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
778
779 /* Assert sanity. */
780 switch (cbValue)
781 {
782 case 1: break;
783 case 2: break;
784 case 4: break;
785 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
786 }
787 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
788 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
789
790 /* Do the work.*/
791 uint32_t uValue = 0;
792 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
793 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
794 if (IOM_SUCCESS(rcStrict))
795 {
796 if (cbValue == 4)
797 pVCpu->cpum.GstCtx.rax = uValue;
798 else if (cbValue == 2)
799 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
800 else
801 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
802 pVCpu->cpum.GstCtx.rip += cbInstr;
803 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
804 }
805 return rcStrict;
806}
807
808
809/**
810 * Debug loop.
811 *
812 * @returns VBox status code for EM.
813 * @param pVM The cross context VM structure.
814 * @param pVCpu The cross context virtual CPU structure.
815 * @param rc Current EM VBox status code.
816 */
817static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
818{
819 for (;;)
820 {
821 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
822 const VBOXSTRICTRC rcLast = rc;
823
824 /*
825 * Debug related RC.
826 */
827 switch (VBOXSTRICTRC_VAL(rc))
828 {
829 /*
830 * Single step an instruction.
831 */
832 case VINF_EM_DBG_STEP:
833 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
834 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
835 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
836 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
837 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
838 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
839 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
840#ifdef VBOX_WITH_REM
841 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
842 rc = emR3RemStep(pVM, pVCpu);
843#endif
844 else
845 {
846 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
847 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
848 rc = VINF_EM_DBG_STEPPED;
849 }
850 break;
851
852 /*
853 * Simple events: stepped, breakpoint, stop/assertion.
854 */
855 case VINF_EM_DBG_STEPPED:
856 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
857 break;
858
859 case VINF_EM_DBG_BREAKPOINT:
860 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
861 break;
862
863 case VINF_EM_DBG_STOP:
864 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
865 break;
866
867 case VINF_EM_DBG_EVENT:
868 rc = DBGFR3EventHandlePending(pVM, pVCpu);
869 break;
870
871 case VINF_EM_DBG_HYPER_STEPPED:
872 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
873 break;
874
875 case VINF_EM_DBG_HYPER_BREAKPOINT:
876 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
877 break;
878
879 case VINF_EM_DBG_HYPER_ASSERTION:
880 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
881 RTLogFlush(NULL);
882 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
883 break;
884
885 /*
886 * Guru meditation.
887 */
888 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
889 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
890 break;
891 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
892 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
893 break;
894 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
895 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
896 break;
897
898 default: /** @todo don't use default for guru, but make special errors code! */
899 {
900 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
901 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
902 break;
903 }
904 }
905
906 /*
907 * Process the result.
908 */
909 switch (VBOXSTRICTRC_VAL(rc))
910 {
911 /*
912 * Continue the debugging loop.
913 */
914 case VINF_EM_DBG_STEP:
915 case VINF_EM_DBG_STOP:
916 case VINF_EM_DBG_EVENT:
917 case VINF_EM_DBG_STEPPED:
918 case VINF_EM_DBG_BREAKPOINT:
919 case VINF_EM_DBG_HYPER_STEPPED:
920 case VINF_EM_DBG_HYPER_BREAKPOINT:
921 case VINF_EM_DBG_HYPER_ASSERTION:
922 break;
923
924 /*
925 * Resuming execution (in some form) has to be done here if we got
926 * a hypervisor debug event.
927 */
928 case VINF_SUCCESS:
929 case VINF_EM_RESUME:
930 case VINF_EM_SUSPEND:
931 case VINF_EM_RESCHEDULE:
932 case VINF_EM_RESCHEDULE_RAW:
933 case VINF_EM_RESCHEDULE_REM:
934 case VINF_EM_HALT:
935 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
936 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
937 if (rc == VINF_SUCCESS)
938 rc = VINF_EM_RESCHEDULE;
939 return rc;
940
941 /*
942 * The debugger isn't attached.
943 * We'll simply turn the thing off since that's the easiest thing to do.
944 */
945 case VERR_DBGF_NOT_ATTACHED:
946 switch (VBOXSTRICTRC_VAL(rcLast))
947 {
948 case VINF_EM_DBG_HYPER_STEPPED:
949 case VINF_EM_DBG_HYPER_BREAKPOINT:
950 case VINF_EM_DBG_HYPER_ASSERTION:
951 case VERR_TRPM_PANIC:
952 case VERR_TRPM_DONT_PANIC:
953 case VERR_VMM_RING0_ASSERTION:
954 case VERR_VMM_HYPER_CR3_MISMATCH:
955 case VERR_VMM_RING3_CALL_DISABLED:
956 return rcLast;
957 }
958 return VINF_EM_OFF;
959
960 /*
961 * Status codes terminating the VM in one or another sense.
962 */
963 case VINF_EM_TERMINATE:
964 case VINF_EM_OFF:
965 case VINF_EM_RESET:
966 case VINF_EM_NO_MEMORY:
967 case VINF_EM_RAW_STALE_SELECTOR:
968 case VINF_EM_RAW_IRET_TRAP:
969 case VERR_TRPM_PANIC:
970 case VERR_TRPM_DONT_PANIC:
971 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
972 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
973 case VERR_VMM_RING0_ASSERTION:
974 case VERR_VMM_HYPER_CR3_MISMATCH:
975 case VERR_VMM_RING3_CALL_DISABLED:
976 case VERR_INTERNAL_ERROR:
977 case VERR_INTERNAL_ERROR_2:
978 case VERR_INTERNAL_ERROR_3:
979 case VERR_INTERNAL_ERROR_4:
980 case VERR_INTERNAL_ERROR_5:
981 case VERR_IPE_UNEXPECTED_STATUS:
982 case VERR_IPE_UNEXPECTED_INFO_STATUS:
983 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
984 return rc;
985
986 /*
987 * The rest is unexpected, and will keep us here.
988 */
989 default:
990 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
991 break;
992 }
993 } /* debug for ever */
994}
995
996
997#if defined(VBOX_WITH_REM) || defined(DEBUG)
998/**
999 * Steps recompiled code.
1000 *
1001 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1002 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1003 *
1004 * @param pVM The cross context VM structure.
1005 * @param pVCpu The cross context virtual CPU structure.
1006 */
1007static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1008{
1009 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1010
1011# ifdef VBOX_WITH_REM
1012 EMRemLock(pVM);
1013
1014 /*
1015 * Switch to REM, step instruction, switch back.
1016 */
1017 int rc = REMR3State(pVM, pVCpu);
1018 if (RT_SUCCESS(rc))
1019 {
1020 rc = REMR3Step(pVM, pVCpu);
1021 REMR3StateBack(pVM, pVCpu);
1022 }
1023 EMRemUnlock(pVM);
1024
1025# else
1026 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1027# endif
1028
1029 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1030 return rc;
1031}
1032#endif /* VBOX_WITH_REM || DEBUG */
1033
1034
1035#ifdef VBOX_WITH_REM
1036/**
1037 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1038 * critical section.
1039 *
1040 * @returns false - new fInREMState value.
1041 * @param pVM The cross context VM structure.
1042 * @param pVCpu The cross context virtual CPU structure.
1043 */
1044DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1045{
1046 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1047 REMR3StateBack(pVM, pVCpu);
1048 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1049
1050 EMRemUnlock(pVM);
1051 return false;
1052}
1053#endif
1054
1055
1056/**
1057 * Executes recompiled code.
1058 *
1059 * This function contains the recompiler version of the inner
1060 * execution loop (the outer loop being in EMR3ExecuteVM()).
1061 *
1062 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1063 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1064 *
1065 * @param pVM The cross context VM structure.
1066 * @param pVCpu The cross context virtual CPU structure.
1067 * @param pfFFDone Where to store an indicator telling whether or not
1068 * FFs were done before returning.
1069 *
1070 */
1071static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1072{
1073#ifdef LOG_ENABLED
1074 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1075
1076 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1077 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1078 else
1079 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1080#endif
1081 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1082
1083#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1084 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1085 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1086 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1087#endif
1088
1089 /*
1090 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1091 * or the REM suggests raw-mode execution.
1092 */
1093 *pfFFDone = false;
1094#ifdef VBOX_WITH_REM
1095 bool fInREMState = false;
1096#else
1097 uint32_t cLoops = 0;
1098#endif
1099 int rc = VINF_SUCCESS;
1100 for (;;)
1101 {
1102#ifdef VBOX_WITH_REM
1103 /*
1104 * Lock REM and update the state if not already in sync.
1105 *
1106 * Note! Big lock, but you are not supposed to own any lock when
1107 * coming in here.
1108 */
1109 if (!fInREMState)
1110 {
1111 EMRemLock(pVM);
1112 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1113
1114 /* Flush the recompiler translation blocks if the VCPU has changed,
1115 also force a full CPU state resync. */
1116 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1117 {
1118 REMFlushTBs(pVM);
1119 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1120 }
1121 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1122
1123 rc = REMR3State(pVM, pVCpu);
1124
1125 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1126 if (RT_FAILURE(rc))
1127 break;
1128 fInREMState = true;
1129
1130 /*
1131 * We might have missed the raising of VMREQ, TIMER and some other
1132 * important FFs while we were busy switching the state. So, check again.
1133 */
1134 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1135 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1136 {
1137 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1138 goto l_REMDoForcedActions;
1139 }
1140 }
1141#endif
1142
1143 /*
1144 * Execute REM.
1145 */
1146 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1147 {
1148 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1149#ifdef VBOX_WITH_REM
1150 rc = REMR3Run(pVM, pVCpu);
1151#else
1152 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1153#endif
1154 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1155 }
1156 else
1157 {
1158 /* Give up this time slice; virtual time continues */
1159 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1160 RTThreadSleep(5);
1161 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1162 rc = VINF_SUCCESS;
1163 }
1164
1165 /*
1166 * Deal with high priority post execution FFs before doing anything
1167 * else. Sync back the state and leave the lock to be on the safe side.
1168 */
1169 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1170 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1171 {
1172#ifdef VBOX_WITH_REM
1173 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1174#endif
1175 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1176 }
1177
1178 /*
1179 * Process the returned status code.
1180 */
1181 if (rc != VINF_SUCCESS)
1182 {
1183 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1184 break;
1185 if (rc != VINF_REM_INTERRUPED_FF)
1186 {
1187#ifndef VBOX_WITH_REM
1188 /* Try dodge unimplemented IEM trouble by reschduling. */
1189 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1190 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1191 {
1192 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1193 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1194 {
1195 rc = VINF_EM_RESCHEDULE;
1196 break;
1197 }
1198 }
1199#endif
1200
1201 /*
1202 * Anything which is not known to us means an internal error
1203 * and the termination of the VM!
1204 */
1205 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1206 break;
1207 }
1208 }
1209
1210
1211 /*
1212 * Check and execute forced actions.
1213 *
1214 * Sync back the VM state and leave the lock before calling any of
1215 * these, you never know what's going to happen here.
1216 */
1217#ifdef VBOX_HIGH_RES_TIMERS_HACK
1218 TMTimerPollVoid(pVM, pVCpu);
1219#endif
1220 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1221 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1222 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1223 {
1224#ifdef VBOX_WITH_REM
1225l_REMDoForcedActions:
1226 if (fInREMState)
1227 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1228#endif
1229 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1230 rc = emR3ForcedActions(pVM, pVCpu, rc);
1231 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1232 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1233 if ( rc != VINF_SUCCESS
1234 && rc != VINF_EM_RESCHEDULE_REM)
1235 {
1236 *pfFFDone = true;
1237 break;
1238 }
1239 }
1240
1241#ifndef VBOX_WITH_REM
1242 /*
1243 * Have to check if we can get back to fast execution mode every so often.
1244 */
1245 if (!(++cLoops & 7))
1246 {
1247 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1248 if ( enmCheck != EMSTATE_REM
1249 && enmCheck != EMSTATE_IEM_THEN_REM)
1250 return VINF_EM_RESCHEDULE;
1251 }
1252#endif
1253
1254 } /* The Inner Loop, recompiled execution mode version. */
1255
1256
1257#ifdef VBOX_WITH_REM
1258 /*
1259 * Returning. Sync back the VM state if required.
1260 */
1261 if (fInREMState)
1262 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1263#endif
1264
1265 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1266 return rc;
1267}
1268
1269
1270#ifdef DEBUG
1271
1272int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1273{
1274 EMSTATE enmOldState = pVCpu->em.s.enmState;
1275
1276 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1277
1278 Log(("Single step BEGIN:\n"));
1279 for (uint32_t i = 0; i < cIterations; i++)
1280 {
1281 DBGFR3PrgStep(pVCpu);
1282 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1283 emR3RemStep(pVM, pVCpu);
1284 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1285 break;
1286 }
1287 Log(("Single step END:\n"));
1288 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1289 pVCpu->em.s.enmState = enmOldState;
1290 return VINF_EM_RESCHEDULE;
1291}
1292
1293#endif /* DEBUG */
1294
1295
1296/**
1297 * Try execute the problematic code in IEM first, then fall back on REM if there
1298 * is too much of it or if IEM doesn't implement something.
1299 *
1300 * @returns Strict VBox status code from IEMExecLots.
1301 * @param pVM The cross context VM structure.
1302 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1303 * @param pfFFDone Force flags done indicator.
1304 *
1305 * @thread EMT(pVCpu)
1306 */
1307static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1308{
1309 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1310 *pfFFDone = false;
1311
1312 /*
1313 * Execute in IEM for a while.
1314 */
1315 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1316 {
1317 uint32_t cInstructions;
1318 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1319 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1320 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1321 if (rcStrict != VINF_SUCCESS)
1322 {
1323 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1324 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1325 break;
1326
1327 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1328 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1329 return rcStrict;
1330 }
1331
1332 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1333 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1334 {
1335 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1336 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1337 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1338 pVCpu->em.s.enmState = enmNewState;
1339 return VINF_SUCCESS;
1340 }
1341
1342 /*
1343 * Check for pending actions.
1344 */
1345 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1346 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1347 return VINF_SUCCESS;
1348 }
1349
1350 /*
1351 * Switch to REM.
1352 */
1353 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1354 pVCpu->em.s.enmState = EMSTATE_REM;
1355 return VINF_SUCCESS;
1356}
1357
1358
1359/**
1360 * Decides whether to execute RAW, HWACC or REM.
1361 *
1362 * @returns new EM state
1363 * @param pVM The cross context VM structure.
1364 * @param pVCpu The cross context virtual CPU structure.
1365 */
1366EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1367{
1368 /*
1369 * We stay in the wait for SIPI state unless explicitly told otherwise.
1370 */
1371 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1372 return EMSTATE_WAIT_SIPI;
1373
1374 /*
1375 * Execute everything in IEM?
1376 */
1377 if (pVM->em.s.fIemExecutesAll)
1378 return EMSTATE_IEM;
1379
1380 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1381 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1382 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1383
1384 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1385 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1386 {
1387 if (VM_IS_HM_ENABLED(pVM))
1388 {
1389 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1390 return EMSTATE_HM;
1391 }
1392 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1393 return EMSTATE_NEM;
1394
1395 /*
1396 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1397 * turns off monitoring features essential for raw mode!
1398 */
1399 return EMSTATE_IEM_THEN_REM;
1400 }
1401
1402 /*
1403 * Standard raw-mode:
1404 *
1405 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1406 * or 32 bits protected mode ring 0 code
1407 *
1408 * The tests are ordered by the likelihood of being true during normal execution.
1409 */
1410 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1411 {
1412 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1413 return EMSTATE_REM;
1414 }
1415
1416# ifndef VBOX_RAW_V86
1417 if (EFlags.u32 & X86_EFL_VM) {
1418 Log2(("raw mode refused: VM_MASK\n"));
1419 return EMSTATE_REM;
1420 }
1421# endif
1422
1423 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1424 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1425 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1426 {
1427 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1428 return EMSTATE_REM;
1429 }
1430
1431 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1432 {
1433 uint32_t u32Dummy, u32Features;
1434
1435 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1436 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1437 return EMSTATE_REM;
1438 }
1439
1440 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1441 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1442 || (uSS & X86_SEL_RPL) == 3)
1443 {
1444 if (!(EFlags.u32 & X86_EFL_IF))
1445 {
1446 Log2(("raw mode refused: IF (RawR3)\n"));
1447 return EMSTATE_REM;
1448 }
1449
1450 if (!(u32CR0 & X86_CR0_WP))
1451 {
1452 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1453 return EMSTATE_REM;
1454 }
1455 }
1456 else
1457 {
1458 /* Only ring 0 supervisor code. */
1459 if ((uSS & X86_SEL_RPL) != 0)
1460 {
1461 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1462 return EMSTATE_REM;
1463 }
1464
1465 // Let's start with pure 32 bits ring 0 code first
1466 /** @todo What's pure 32-bit mode? flat? */
1467 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1468 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1469 {
1470 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1471 return EMSTATE_REM;
1472 }
1473
1474 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1475 if (!(u32CR0 & X86_CR0_WP))
1476 {
1477 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1478 return EMSTATE_REM;
1479 }
1480
1481# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1482 if (!(EFlags.u32 & X86_EFL_IF))
1483 {
1484 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1485 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1486 return EMSTATE_REM;
1487 }
1488# endif
1489
1490# ifndef VBOX_WITH_RAW_RING1
1491 /** @todo still necessary??? */
1492 if (EFlags.Bits.u2IOPL != 0)
1493 {
1494 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1495 return EMSTATE_REM;
1496 }
1497# endif
1498 }
1499
1500 /*
1501 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1502 */
1503 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1504 {
1505 Log2(("raw mode refused: stale CS\n"));
1506 return EMSTATE_REM;
1507 }
1508 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1509 {
1510 Log2(("raw mode refused: stale SS\n"));
1511 return EMSTATE_REM;
1512 }
1513 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1514 {
1515 Log2(("raw mode refused: stale DS\n"));
1516 return EMSTATE_REM;
1517 }
1518 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1519 {
1520 Log2(("raw mode refused: stale ES\n"));
1521 return EMSTATE_REM;
1522 }
1523 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1524 {
1525 Log2(("raw mode refused: stale FS\n"));
1526 return EMSTATE_REM;
1527 }
1528 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1529 {
1530 Log2(("raw mode refused: stale GS\n"));
1531 return EMSTATE_REM;
1532 }
1533
1534# ifdef VBOX_WITH_SAFE_STR
1535 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1536 {
1537 Log(("Raw mode refused -> TR=0\n"));
1538 return EMSTATE_REM;
1539 }
1540# endif
1541
1542 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1543 return EMSTATE_RAW;
1544}
1545
1546
1547/**
1548 * Executes all high priority post execution force actions.
1549 *
1550 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1551 * fatal error status code.
1552 *
1553 * @param pVM The cross context VM structure.
1554 * @param pVCpu The cross context virtual CPU structure.
1555 * @param rc The current strict VBox status code rc.
1556 */
1557VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1558{
1559 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1560
1561 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1562 PDMCritSectBothFF(pVCpu);
1563
1564 /* Update CR3 (Nested Paging case for HM). */
1565 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1566 {
1567 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1568 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1569 if (RT_FAILURE(rc2))
1570 return rc2;
1571 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1572 }
1573
1574 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1575 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1576 {
1577 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1578 if (CPUMIsGuestInPAEMode(pVCpu))
1579 {
1580 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1581 AssertPtr(pPdpes);
1582
1583 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1584 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1585 }
1586 else
1587 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1588 }
1589
1590 /* IEM has pending work (typically memory write after INS instruction). */
1591 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1592 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1593
1594 /* IOM has pending work (comitting an I/O or MMIO write). */
1595 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1596 {
1597 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1598 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1599 { /* half likely, or at least it's a line shorter. */ }
1600 else if (rc == VINF_SUCCESS)
1601 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1602 else
1603 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1604 }
1605
1606 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1607 {
1608 if ( rc > VINF_EM_NO_MEMORY
1609 && rc <= VINF_EM_LAST)
1610 rc = VINF_EM_NO_MEMORY;
1611 }
1612
1613 return rc;
1614}
1615
1616
1617/**
1618 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1619 *
1620 * @returns VBox status code.
1621 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1622 * @param pVCpu The cross context virtual CPU structure.
1623 */
1624static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1625{
1626#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1627 /* Handle the "external interrupt" VM-exit intercept. */
1628 if ( CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1629 && !CPUMIsGuestVmxExitCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1630 {
1631 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1632 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1633 && rcStrict != VINF_VMX_VMEXIT
1634 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1635 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1636 return VBOXSTRICTRC_TODO(rcStrict);
1637 }
1638#else
1639 RT_NOREF(pVCpu);
1640#endif
1641 return VINF_NO_CHANGE;
1642}
1643
1644
1645/**
1646 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1647 *
1648 * @returns VBox status code.
1649 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1650 * @param pVCpu The cross context virtual CPU structure.
1651 */
1652static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1653{
1654#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1655 /* Handle the physical interrupt intercept (can be masked by the guest hypervisor). */
1656 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1657 {
1658 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1659 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1660 if (RT_SUCCESS(rcStrict))
1661 {
1662 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1663 && rcStrict != VINF_SVM_VMEXIT
1664 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1665 return VBOXSTRICTRC_VAL(rcStrict);
1666 }
1667
1668 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1669 return VINF_EM_TRIPLE_FAULT;
1670 }
1671#else
1672 NOREF(pVCpu);
1673#endif
1674 return VINF_NO_CHANGE;
1675}
1676
1677
1678/**
1679 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1680 *
1681 * @returns VBox status code.
1682 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1683 * @param pVCpu The cross context virtual CPU structure.
1684 */
1685static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1686{
1687#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1688 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1689 {
1690 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1691 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1692 if (RT_SUCCESS(rcStrict))
1693 {
1694 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1695 Assert(rcStrict != VINF_SVM_VMEXIT);
1696 return VBOXSTRICTRC_VAL(rcStrict);
1697 }
1698 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1699 return VINF_EM_TRIPLE_FAULT;
1700 }
1701#else
1702 NOREF(pVCpu);
1703#endif
1704 return VINF_NO_CHANGE;
1705}
1706
1707
1708/**
1709 * Executes all pending forced actions.
1710 *
1711 * Forced actions can cause execution delays and execution
1712 * rescheduling. The first we deal with using action priority, so
1713 * that for instance pending timers aren't scheduled and ran until
1714 * right before execution. The rescheduling we deal with using
1715 * return codes. The same goes for VM termination, only in that case
1716 * we exit everything.
1717 *
1718 * @returns VBox status code of equal or greater importance/severity than rc.
1719 * The most important ones are: VINF_EM_RESCHEDULE,
1720 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1721 *
1722 * @param pVM The cross context VM structure.
1723 * @param pVCpu The cross context virtual CPU structure.
1724 * @param rc The current rc.
1725 *
1726 */
1727int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1728{
1729 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1730#ifdef VBOX_STRICT
1731 int rcIrq = VINF_SUCCESS;
1732#endif
1733 int rc2;
1734#define UPDATE_RC() \
1735 do { \
1736 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1737 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1738 break; \
1739 if (!rc || rc2 < rc) \
1740 rc = rc2; \
1741 } while (0)
1742 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1743
1744 /*
1745 * Post execution chunk first.
1746 */
1747 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1748 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1749 {
1750 /*
1751 * EMT Rendezvous (must be serviced before termination).
1752 */
1753 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1754 {
1755 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1756 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1757 UPDATE_RC();
1758 /** @todo HACK ALERT! The following test is to make sure EM+TM
1759 * thinks the VM is stopped/reset before the next VM state change
1760 * is made. We need a better solution for this, or at least make it
1761 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1762 * VINF_EM_SUSPEND). */
1763 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1764 {
1765 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1766 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1767 return rc;
1768 }
1769 }
1770
1771 /*
1772 * State change request (cleared by vmR3SetStateLocked).
1773 */
1774 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1775 {
1776 VMSTATE enmState = VMR3GetState(pVM);
1777 switch (enmState)
1778 {
1779 case VMSTATE_FATAL_ERROR:
1780 case VMSTATE_FATAL_ERROR_LS:
1781 case VMSTATE_GURU_MEDITATION:
1782 case VMSTATE_GURU_MEDITATION_LS:
1783 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1784 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1785 return VINF_EM_SUSPEND;
1786
1787 case VMSTATE_DESTROYING:
1788 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1789 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1790 return VINF_EM_TERMINATE;
1791
1792 default:
1793 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1794 }
1795 }
1796
1797 /*
1798 * Debugger Facility polling.
1799 */
1800 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1801 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1802 {
1803 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1804 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1805 UPDATE_RC();
1806 }
1807
1808 /*
1809 * Postponed reset request.
1810 */
1811 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1812 {
1813 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1814 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1815 UPDATE_RC();
1816 }
1817
1818 /*
1819 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1820 */
1821 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1822 {
1823 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1824 UPDATE_RC();
1825 if (rc == VINF_EM_NO_MEMORY)
1826 return rc;
1827 }
1828
1829 /* check that we got them all */
1830 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1831 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1832 }
1833
1834 /*
1835 * Normal priority then.
1836 * (Executed in no particular order.)
1837 */
1838 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1839 {
1840 /*
1841 * PDM Queues are pending.
1842 */
1843 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1844 PDMR3QueueFlushAll(pVM);
1845
1846 /*
1847 * PDM DMA transfers are pending.
1848 */
1849 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1850 PDMR3DmaRun(pVM);
1851
1852 /*
1853 * EMT Rendezvous (make sure they are handled before the requests).
1854 */
1855 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1856 {
1857 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1858 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1859 UPDATE_RC();
1860 /** @todo HACK ALERT! The following test is to make sure EM+TM
1861 * thinks the VM is stopped/reset before the next VM state change
1862 * is made. We need a better solution for this, or at least make it
1863 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1864 * VINF_EM_SUSPEND). */
1865 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1866 {
1867 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1868 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1869 return rc;
1870 }
1871 }
1872
1873 /*
1874 * Requests from other threads.
1875 */
1876 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1877 {
1878 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1879 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1880 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1881 {
1882 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1883 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1884 return rc2;
1885 }
1886 UPDATE_RC();
1887 /** @todo HACK ALERT! The following test is to make sure EM+TM
1888 * thinks the VM is stopped/reset before the next VM state change
1889 * is made. We need a better solution for this, or at least make it
1890 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1891 * VINF_EM_SUSPEND). */
1892 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1893 {
1894 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1895 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1896 return rc;
1897 }
1898 }
1899
1900#ifdef VBOX_WITH_REM
1901 /* Replay the handler notification changes. */
1902 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1903 {
1904 /* Try not to cause deadlocks. */
1905 if ( pVM->cCpus == 1
1906 || ( !PGMIsLockOwner(pVM)
1907 && !IOMIsLockWriteOwner(pVM))
1908 )
1909 {
1910 EMRemLock(pVM);
1911 REMR3ReplayHandlerNotifications(pVM);
1912 EMRemUnlock(pVM);
1913 }
1914 }
1915#endif
1916
1917 /* check that we got them all */
1918 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1919 }
1920
1921 /*
1922 * Normal priority then. (per-VCPU)
1923 * (Executed in no particular order.)
1924 */
1925 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1926 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1927 {
1928 /*
1929 * Requests from other threads.
1930 */
1931 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1932 {
1933 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1934 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1935 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1936 {
1937 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1938 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1939 return rc2;
1940 }
1941 UPDATE_RC();
1942 /** @todo HACK ALERT! The following test is to make sure EM+TM
1943 * thinks the VM is stopped/reset before the next VM state change
1944 * is made. We need a better solution for this, or at least make it
1945 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1946 * VINF_EM_SUSPEND). */
1947 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1948 {
1949 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1950 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1951 return rc;
1952 }
1953 }
1954
1955 /* check that we got them all */
1956 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1957 }
1958
1959 /*
1960 * High priority pre execution chunk last.
1961 * (Executed in ascending priority order.)
1962 */
1963 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1964 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1965 {
1966 /*
1967 * Timers before interrupts.
1968 */
1969 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1970 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1971 TMR3TimerQueuesDo(pVM);
1972
1973 /*
1974 * Pick up asynchronously posted interrupts into the APIC.
1975 */
1976 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1977 APICUpdatePendingInterrupts(pVCpu);
1978
1979 /*
1980 * The instruction following an emulated STI should *always* be executed!
1981 *
1982 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1983 * the eip is the same as the inhibited instr address. Before we
1984 * are able to execute this instruction in raw mode (iret to
1985 * guest code) an external interrupt might force a world switch
1986 * again. Possibly allowing a guest interrupt to be dispatched
1987 * in the process. This could break the guest. Sounds very
1988 * unlikely, but such timing sensitive problem are not as rare as
1989 * you might think.
1990 */
1991 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1992 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1993 {
1994 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1995 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1996 {
1997 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1998 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1999 }
2000 else
2001 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2002 }
2003
2004 /** @todo SMIs. If we implement SMIs, this is where they will have to be
2005 * delivered. */
2006
2007#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2008 /*
2009 * VMX Nested-guest APIC-write pending (can cause VM-exits).
2010 * Takes priority over even SMI and INIT signals.
2011 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
2012 */
2013 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2014 {
2015 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
2016 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2017 UPDATE_RC();
2018 }
2019
2020 /*
2021 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
2022 * Takes priority over "Traps on the previous instruction".
2023 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
2024 */
2025 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
2026 {
2027 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
2028 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2029 UPDATE_RC();
2030 }
2031
2032 /*
2033 * VMX Nested-guest preemption timer VM-exit.
2034 * Takes priority over NMI-window VM-exits.
2035 */
2036 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
2037 {
2038 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
2039 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2040 UPDATE_RC();
2041 }
2042#endif
2043
2044 /*
2045 * Guest event injection.
2046 */
2047 bool fWakeupPending = false;
2048 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2049 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
2050 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
2051 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
2052 {
2053 bool fInVmxNonRootMode;
2054 bool fInSvmHwvirtMode;
2055 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
2056 if (fInNestedGuest)
2057 {
2058 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
2059 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
2060 }
2061 else
2062 {
2063 fInVmxNonRootMode = false;
2064 fInSvmHwvirtMode = false;
2065 }
2066
2067 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
2068 if (fGif)
2069 {
2070#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2071 /*
2072 * VMX NMI-window VM-exit.
2073 * Takes priority over non-maskable interrupts (NMIs).
2074 * Interrupt shadows block NMI-window VM-exits.
2075 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
2076 *
2077 * See Intel spec. 25.2 "Other Causes Of VM Exits".
2078 * See Intel spec. 26.7.6 "NMI-Window Exiting".
2079 */
2080 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
2081 && !CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
2082 {
2083 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
2084 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
2085 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2086 && rc2 != VINF_PGM_CHANGE_MODE
2087 && rc2 != VINF_VMX_VMEXIT
2088 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2089 UPDATE_RC();
2090 }
2091 else
2092#endif
2093 /*
2094 * NMIs (take priority over external interrupts).
2095 */
2096 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
2097 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2098 {
2099#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2100 if ( fInVmxNonRootMode
2101 && CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
2102 {
2103 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
2104 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2105 UPDATE_RC();
2106 }
2107 else
2108#endif
2109#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2110 if ( fInSvmHwvirtMode
2111 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
2112 {
2113 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
2114 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
2115 && rc2 != VINF_SVM_VMEXIT
2116 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2117 UPDATE_RC();
2118 }
2119 else
2120#endif
2121 {
2122 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
2123 if (rc2 == VINF_SUCCESS)
2124 {
2125 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2126 fWakeupPending = true;
2127 if (pVM->em.s.fIemExecutesAll)
2128 rc2 = VINF_EM_RESCHEDULE;
2129 else
2130 {
2131 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2132 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2133 : VINF_EM_RESCHEDULE_REM;
2134 }
2135 }
2136 UPDATE_RC();
2137 }
2138 }
2139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2140 /*
2141 * VMX Interrupt-window VM-exits.
2142 * Takes priority over external interrupts.
2143 */
2144 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2145 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2146 {
2147 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2148 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2149 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2150 && rc2 != VINF_PGM_CHANGE_MODE
2151 && rc2 != VINF_VMX_VMEXIT
2152 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2153 UPDATE_RC();
2154 }
2155#endif
2156#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2157 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2158 * actually pending like we currently do. */
2159#endif
2160 /*
2161 * External interrupts.
2162 */
2163 else
2164 {
2165 /*
2166 * VMX: virtual interrupts takes priority over physical interrupts.
2167 * SVM: physical interrupts takes priority over virtual interrupts.
2168 */
2169 if ( fInVmxNonRootMode
2170 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2171 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2172 {
2173 /** @todo NSTVMX: virtual-interrupt delivery. */
2174 rc2 = VINF_SUCCESS;
2175 }
2176 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2177 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2178 {
2179 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2180 if (fInVmxNonRootMode)
2181 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2182 else if (fInSvmHwvirtMode)
2183 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2184 else
2185 rc2 = VINF_NO_CHANGE;
2186
2187 if (rc2 == VINF_NO_CHANGE)
2188 {
2189 bool fInjected = false;
2190 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2191 /** @todo this really isn't nice, should properly handle this */
2192 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2193 fWakeupPending = true;
2194 if ( pVM->em.s.fIemExecutesAll
2195 && ( rc2 == VINF_EM_RESCHEDULE_REM
2196 || rc2 == VINF_EM_RESCHEDULE_HM
2197 || rc2 == VINF_EM_RESCHEDULE_RAW))
2198 {
2199 rc2 = VINF_EM_RESCHEDULE;
2200 }
2201#ifdef VBOX_STRICT
2202 if (fInjected)
2203 rcIrq = rc2;
2204#endif
2205 }
2206 UPDATE_RC();
2207 }
2208 else if ( fInSvmHwvirtMode
2209 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2210 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2211 {
2212 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2213 if (rc2 == VINF_NO_CHANGE)
2214 {
2215 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2216 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2217 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2218 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2219 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2220 rc2 = VINF_EM_RESCHEDULE;
2221#ifdef VBOX_STRICT
2222 rcIrq = rc2;
2223#endif
2224 }
2225 UPDATE_RC();
2226 }
2227 }
2228 }
2229 }
2230
2231 /*
2232 * Allocate handy pages.
2233 */
2234 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2235 {
2236 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2237 UPDATE_RC();
2238 }
2239
2240 /*
2241 * Debugger Facility request.
2242 */
2243 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2244 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2245 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2246 {
2247 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2248 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2249 UPDATE_RC();
2250 }
2251
2252 /*
2253 * EMT Rendezvous (must be serviced before termination).
2254 */
2255 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2256 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2257 {
2258 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2259 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2260 UPDATE_RC();
2261 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2262 * stopped/reset before the next VM state change is made. We need a better
2263 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2264 * && rc >= VINF_EM_SUSPEND). */
2265 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2266 {
2267 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2268 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2269 return rc;
2270 }
2271 }
2272
2273 /*
2274 * State change request (cleared by vmR3SetStateLocked).
2275 */
2276 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2277 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2278 {
2279 VMSTATE enmState = VMR3GetState(pVM);
2280 switch (enmState)
2281 {
2282 case VMSTATE_FATAL_ERROR:
2283 case VMSTATE_FATAL_ERROR_LS:
2284 case VMSTATE_GURU_MEDITATION:
2285 case VMSTATE_GURU_MEDITATION_LS:
2286 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2287 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2288 return VINF_EM_SUSPEND;
2289
2290 case VMSTATE_DESTROYING:
2291 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2292 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2293 return VINF_EM_TERMINATE;
2294
2295 default:
2296 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2297 }
2298 }
2299
2300 /*
2301 * Out of memory? Since most of our fellow high priority actions may cause us
2302 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2303 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2304 * than us since we can terminate without allocating more memory.
2305 */
2306 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2307 {
2308 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2309 UPDATE_RC();
2310 if (rc == VINF_EM_NO_MEMORY)
2311 return rc;
2312 }
2313
2314 /*
2315 * If the virtual sync clock is still stopped, make TM restart it.
2316 */
2317 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2318 TMR3VirtualSyncFF(pVM, pVCpu);
2319
2320#ifdef DEBUG
2321 /*
2322 * Debug, pause the VM.
2323 */
2324 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2325 {
2326 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2327 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2328 return VINF_EM_SUSPEND;
2329 }
2330#endif
2331
2332 /* check that we got them all */
2333 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2334 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2335 }
2336
2337#undef UPDATE_RC
2338 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2339 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2340 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2341 return rc;
2342}
2343
2344
2345/**
2346 * Check if the preset execution time cap restricts guest execution scheduling.
2347 *
2348 * @returns true if allowed, false otherwise
2349 * @param pVM The cross context VM structure.
2350 * @param pVCpu The cross context virtual CPU structure.
2351 */
2352bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2353{
2354 uint64_t u64UserTime, u64KernelTime;
2355
2356 if ( pVM->uCpuExecutionCap != 100
2357 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2358 {
2359 uint64_t u64TimeNow = RTTimeMilliTS();
2360 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2361 {
2362 /* New time slice. */
2363 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2364 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2365 pVCpu->em.s.u64TimeSliceExec = 0;
2366 }
2367 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2368
2369 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2370 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2371 return false;
2372 }
2373 return true;
2374}
2375
2376
2377/**
2378 * Execute VM.
2379 *
2380 * This function is the main loop of the VM. The emulation thread
2381 * calls this function when the VM has been successfully constructed
2382 * and we're ready for executing the VM.
2383 *
2384 * Returning from this function means that the VM is turned off or
2385 * suspended (state already saved) and deconstruction is next in line.
2386 *
2387 * All interaction from other thread are done using forced actions
2388 * and signalling of the wait object.
2389 *
2390 * @returns VBox status code, informational status codes may indicate failure.
2391 * @param pVM The cross context VM structure.
2392 * @param pVCpu The cross context virtual CPU structure.
2393 */
2394VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2395{
2396 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2397 pVM,
2398 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2399 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2400 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2401 VM_ASSERT_EMT(pVM);
2402 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2403 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2404 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2405 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2406
2407 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2408 if (rc == 0)
2409 {
2410 /*
2411 * Start the virtual time.
2412 */
2413 TMR3NotifyResume(pVM, pVCpu);
2414
2415 /*
2416 * The Outer Main Loop.
2417 */
2418 bool fFFDone = false;
2419
2420 /* Reschedule right away to start in the right state. */
2421 rc = VINF_SUCCESS;
2422
2423 /* If resuming after a pause or a state load, restore the previous
2424 state or else we'll start executing code. Else, just reschedule. */
2425 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2426 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2427 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2428 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2429 else
2430 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2431 pVCpu->em.s.cIemThenRemInstructions = 0;
2432 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2433
2434 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2435 for (;;)
2436 {
2437 /*
2438 * Before we can schedule anything (we're here because
2439 * scheduling is required) we must service any pending
2440 * forced actions to avoid any pending action causing
2441 * immediate rescheduling upon entering an inner loop
2442 *
2443 * Do forced actions.
2444 */
2445 if ( !fFFDone
2446 && RT_SUCCESS(rc)
2447 && rc != VINF_EM_TERMINATE
2448 && rc != VINF_EM_OFF
2449 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2450 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2451 {
2452 rc = emR3ForcedActions(pVM, pVCpu, rc);
2453 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2454 }
2455 else if (fFFDone)
2456 fFFDone = false;
2457
2458 /*
2459 * Now what to do?
2460 */
2461 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2462 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2463 switch (rc)
2464 {
2465 /*
2466 * Keep doing what we're currently doing.
2467 */
2468 case VINF_SUCCESS:
2469 break;
2470
2471 /*
2472 * Reschedule - to raw-mode execution.
2473 */
2474/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2475 case VINF_EM_RESCHEDULE_RAW:
2476 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2477 if (VM_IS_RAW_MODE_ENABLED(pVM))
2478 {
2479 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2480 pVCpu->em.s.enmState = EMSTATE_RAW;
2481 }
2482 else
2483 {
2484 AssertLogRelFailed();
2485 pVCpu->em.s.enmState = EMSTATE_NONE;
2486 }
2487 break;
2488
2489 /*
2490 * Reschedule - to HM or NEM.
2491 */
2492 case VINF_EM_RESCHEDULE_HM:
2493 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2494 if (VM_IS_HM_ENABLED(pVM))
2495 {
2496 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2497 pVCpu->em.s.enmState = EMSTATE_HM;
2498 }
2499 else if (VM_IS_NEM_ENABLED(pVM))
2500 {
2501 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2502 pVCpu->em.s.enmState = EMSTATE_NEM;
2503 }
2504 else
2505 {
2506 AssertLogRelFailed();
2507 pVCpu->em.s.enmState = EMSTATE_NONE;
2508 }
2509 break;
2510
2511 /*
2512 * Reschedule - to recompiled execution.
2513 */
2514 case VINF_EM_RESCHEDULE_REM:
2515 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2516 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2517 {
2518 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2519 enmOldState, EMSTATE_IEM_THEN_REM));
2520 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2521 {
2522 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2523 pVCpu->em.s.cIemThenRemInstructions = 0;
2524 }
2525 }
2526 else
2527 {
2528 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2529 pVCpu->em.s.enmState = EMSTATE_REM;
2530 }
2531 break;
2532
2533 /*
2534 * Resume.
2535 */
2536 case VINF_EM_RESUME:
2537 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2538 /* Don't reschedule in the halted or wait for SIPI case. */
2539 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2540 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2541 {
2542 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2543 break;
2544 }
2545 /* fall through and get scheduled. */
2546 RT_FALL_THRU();
2547
2548 /*
2549 * Reschedule.
2550 */
2551 case VINF_EM_RESCHEDULE:
2552 {
2553 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2554 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2555 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2556 pVCpu->em.s.cIemThenRemInstructions = 0;
2557 pVCpu->em.s.enmState = enmState;
2558 break;
2559 }
2560
2561 /*
2562 * Halted.
2563 */
2564 case VINF_EM_HALT:
2565 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2566 pVCpu->em.s.enmState = EMSTATE_HALTED;
2567 break;
2568
2569 /*
2570 * Switch to the wait for SIPI state (application processor only)
2571 */
2572 case VINF_EM_WAIT_SIPI:
2573 Assert(pVCpu->idCpu != 0);
2574 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2575 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2576 break;
2577
2578
2579 /*
2580 * Suspend.
2581 */
2582 case VINF_EM_SUSPEND:
2583 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2584 Assert(enmOldState != EMSTATE_SUSPENDED);
2585 pVCpu->em.s.enmPrevState = enmOldState;
2586 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2587 break;
2588
2589 /*
2590 * Reset.
2591 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2592 */
2593 case VINF_EM_RESET:
2594 {
2595 if (pVCpu->idCpu == 0)
2596 {
2597 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2598 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2599 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2600 pVCpu->em.s.cIemThenRemInstructions = 0;
2601 pVCpu->em.s.enmState = enmState;
2602 }
2603 else
2604 {
2605 /* All other VCPUs go into the wait for SIPI state. */
2606 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2607 }
2608 break;
2609 }
2610
2611 /*
2612 * Power Off.
2613 */
2614 case VINF_EM_OFF:
2615 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2616 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2617 TMR3NotifySuspend(pVM, pVCpu);
2618 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2619 return rc;
2620
2621 /*
2622 * Terminate the VM.
2623 */
2624 case VINF_EM_TERMINATE:
2625 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2626 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2627 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2628 TMR3NotifySuspend(pVM, pVCpu);
2629 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2630 return rc;
2631
2632
2633 /*
2634 * Out of memory, suspend the VM and stuff.
2635 */
2636 case VINF_EM_NO_MEMORY:
2637 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2638 Assert(enmOldState != EMSTATE_SUSPENDED);
2639 pVCpu->em.s.enmPrevState = enmOldState;
2640 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2641 TMR3NotifySuspend(pVM, pVCpu);
2642 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2643
2644 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2645 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2646 if (rc != VINF_EM_SUSPEND)
2647 {
2648 if (RT_SUCCESS_NP(rc))
2649 {
2650 AssertLogRelMsgFailed(("%Rrc\n", rc));
2651 rc = VERR_EM_INTERNAL_ERROR;
2652 }
2653 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2654 }
2655 return rc;
2656
2657 /*
2658 * Guest debug events.
2659 */
2660 case VINF_EM_DBG_STEPPED:
2661 case VINF_EM_DBG_STOP:
2662 case VINF_EM_DBG_EVENT:
2663 case VINF_EM_DBG_BREAKPOINT:
2664 case VINF_EM_DBG_STEP:
2665 if (enmOldState == EMSTATE_RAW)
2666 {
2667 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2668 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2669 }
2670 else if (enmOldState == EMSTATE_HM)
2671 {
2672 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2673 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2674 }
2675 else if (enmOldState == EMSTATE_NEM)
2676 {
2677 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2678 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2679 }
2680 else if (enmOldState == EMSTATE_REM)
2681 {
2682 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2683 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2684 }
2685 else
2686 {
2687 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2688 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2689 }
2690 break;
2691
2692 /*
2693 * Hypervisor debug events.
2694 */
2695 case VINF_EM_DBG_HYPER_STEPPED:
2696 case VINF_EM_DBG_HYPER_BREAKPOINT:
2697 case VINF_EM_DBG_HYPER_ASSERTION:
2698 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2699 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2700 break;
2701
2702 /*
2703 * Triple fault.
2704 */
2705 case VINF_EM_TRIPLE_FAULT:
2706 if (!pVM->em.s.fGuruOnTripleFault)
2707 {
2708 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2709 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2710 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2711 continue;
2712 }
2713 /* Else fall through and trigger a guru. */
2714 RT_FALL_THRU();
2715
2716 case VERR_VMM_RING0_ASSERTION:
2717 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2718 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2719 break;
2720
2721 /*
2722 * Any error code showing up here other than the ones we
2723 * know and process above are considered to be FATAL.
2724 *
2725 * Unknown warnings and informational status codes are also
2726 * included in this.
2727 */
2728 default:
2729 if (RT_SUCCESS_NP(rc))
2730 {
2731 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2732 rc = VERR_EM_INTERNAL_ERROR;
2733 }
2734 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2735 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2736 break;
2737 }
2738
2739 /*
2740 * Act on state transition.
2741 */
2742 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2743 if (enmOldState != enmNewState)
2744 {
2745 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2746
2747 /* Clear MWait flags and the unhalt FF. */
2748 if ( enmOldState == EMSTATE_HALTED
2749 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2750 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2751 && ( enmNewState == EMSTATE_RAW
2752 || enmNewState == EMSTATE_HM
2753 || enmNewState == EMSTATE_NEM
2754 || enmNewState == EMSTATE_REM
2755 || enmNewState == EMSTATE_IEM_THEN_REM
2756 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2757 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2758 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2759 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2760 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2761 {
2762 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2763 {
2764 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2765 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2766 }
2767 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2768 {
2769 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2770 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2771 }
2772 }
2773 }
2774 else
2775 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2776
2777 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2778 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2779
2780 /*
2781 * Act on the new state.
2782 */
2783 switch (enmNewState)
2784 {
2785 /*
2786 * Execute raw.
2787 */
2788 case EMSTATE_RAW:
2789 AssertLogRelMsgFailed(("%Rrc\n", rc));
2790 rc = VERR_EM_INTERNAL_ERROR;
2791 break;
2792
2793 /*
2794 * Execute hardware accelerated raw.
2795 */
2796 case EMSTATE_HM:
2797 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2798 break;
2799
2800 /*
2801 * Execute hardware accelerated raw.
2802 */
2803 case EMSTATE_NEM:
2804 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2805 break;
2806
2807 /*
2808 * Execute recompiled.
2809 */
2810 case EMSTATE_REM:
2811 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2812 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2813 break;
2814
2815 /*
2816 * Execute in the interpreter.
2817 */
2818 case EMSTATE_IEM:
2819 {
2820 uint32_t cInstructions = 0;
2821#if 0 /* For testing purposes. */
2822 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2823 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2824 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2825 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2826 rc = VINF_SUCCESS;
2827 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2828#endif
2829 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2830 if (pVM->em.s.fIemExecutesAll)
2831 {
2832 Assert(rc != VINF_EM_RESCHEDULE_REM);
2833 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2834 Assert(rc != VINF_EM_RESCHEDULE_HM);
2835#ifdef VBOX_HIGH_RES_TIMERS_HACK
2836 if (cInstructions < 2048)
2837 TMTimerPollVoid(pVM, pVCpu);
2838#endif
2839 }
2840 fFFDone = false;
2841 break;
2842 }
2843
2844 /*
2845 * Execute in IEM, hoping we can quickly switch aback to HM
2846 * or RAW execution. If our hopes fail, we go to REM.
2847 */
2848 case EMSTATE_IEM_THEN_REM:
2849 {
2850 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2851 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2852 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2853 break;
2854 }
2855
2856 /*
2857 * Application processor execution halted until SIPI.
2858 */
2859 case EMSTATE_WAIT_SIPI:
2860 /* no break */
2861 /*
2862 * hlt - execution halted until interrupt.
2863 */
2864 case EMSTATE_HALTED:
2865 {
2866 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2867 /* If HM (or someone else) store a pending interrupt in
2868 TRPM, it must be dispatched ASAP without any halting.
2869 Anything pending in TRPM has been accepted and the CPU
2870 should already be the right state to receive it. */
2871 if (TRPMHasTrap(pVCpu))
2872 rc = VINF_EM_RESCHEDULE;
2873 /* MWAIT has a special extension where it's woken up when
2874 an interrupt is pending even when IF=0. */
2875 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2876 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2877 {
2878 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2879 if (rc == VINF_SUCCESS)
2880 {
2881 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2882 APICUpdatePendingInterrupts(pVCpu);
2883
2884 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2885 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2886 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2887 {
2888 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2889 rc = VINF_EM_RESCHEDULE;
2890 }
2891 }
2892 }
2893 else
2894 {
2895 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2896 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2897 check VMCPU_FF_UPDATE_APIC here. */
2898 if ( rc == VINF_SUCCESS
2899 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2900 {
2901 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2902 rc = VINF_EM_RESCHEDULE;
2903 }
2904 }
2905
2906 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2907 break;
2908 }
2909
2910 /*
2911 * Suspended - return to VM.cpp.
2912 */
2913 case EMSTATE_SUSPENDED:
2914 TMR3NotifySuspend(pVM, pVCpu);
2915 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2916 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2917 return VINF_EM_SUSPEND;
2918
2919 /*
2920 * Debugging in the guest.
2921 */
2922 case EMSTATE_DEBUG_GUEST_RAW:
2923 case EMSTATE_DEBUG_GUEST_HM:
2924 case EMSTATE_DEBUG_GUEST_NEM:
2925 case EMSTATE_DEBUG_GUEST_IEM:
2926 case EMSTATE_DEBUG_GUEST_REM:
2927 TMR3NotifySuspend(pVM, pVCpu);
2928 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2929 TMR3NotifyResume(pVM, pVCpu);
2930 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2931 break;
2932
2933 /*
2934 * Debugging in the hypervisor.
2935 */
2936 case EMSTATE_DEBUG_HYPER:
2937 {
2938 TMR3NotifySuspend(pVM, pVCpu);
2939 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2940
2941 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2942 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2943 if (rc != VINF_SUCCESS)
2944 {
2945 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2946 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2947 else
2948 {
2949 /* switch to guru meditation mode */
2950 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2951 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2952 VMMR3FatalDump(pVM, pVCpu, rc);
2953 }
2954 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2955 return rc;
2956 }
2957
2958 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2959 TMR3NotifyResume(pVM, pVCpu);
2960 break;
2961 }
2962
2963 /*
2964 * Guru meditation takes place in the debugger.
2965 */
2966 case EMSTATE_GURU_MEDITATION:
2967 {
2968 TMR3NotifySuspend(pVM, pVCpu);
2969 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2970 VMMR3FatalDump(pVM, pVCpu, rc);
2971 emR3Debug(pVM, pVCpu, rc);
2972 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2973 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2974 return rc;
2975 }
2976
2977 /*
2978 * The states we don't expect here.
2979 */
2980 case EMSTATE_NONE:
2981 case EMSTATE_TERMINATING:
2982 default:
2983 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2984 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2985 TMR3NotifySuspend(pVM, pVCpu);
2986 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2987 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2988 return VERR_EM_INTERNAL_ERROR;
2989 }
2990 } /* The Outer Main Loop */
2991 }
2992 else
2993 {
2994 /*
2995 * Fatal error.
2996 */
2997 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2998 TMR3NotifySuspend(pVM, pVCpu);
2999 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3000 VMMR3FatalDump(pVM, pVCpu, rc);
3001 emR3Debug(pVM, pVCpu, rc);
3002 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3003 /** @todo change the VM state! */
3004 return rc;
3005 }
3006
3007 /* not reached */
3008}
3009
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette