VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 99051

最後變更 在這個檔案從99051是 99051,由 vboxsync 提交於 2 年 前

VMM: More ARMv8 x86/amd64 separation work, VBoxVMMArm compiles and links now, bugref:10385

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 114.4 KB
 
1/* $Id: EM.cpp 99051 2023-03-19 16:40:06Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RemExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/disopcode.h>
73#include <VBox/err.h>
74#include "VMMTracing.h"
75
76#include <iprt/asm.h>
77#include <iprt/string.h>
78#include <iprt/stream.h>
79#include <iprt/thread.h>
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91#if defined(VBOX_WITH_REM) || defined(DEBUG)
92static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
93#endif
94static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
95
96
97/**
98 * Initializes the EM.
99 *
100 * @returns VBox status code.
101 * @param pVM The cross context VM structure.
102 */
103VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
104{
105 LogFlow(("EMR3Init\n"));
106 /*
107 * Assert alignment and sizes.
108 */
109 AssertCompileMemberAlignment(VM, em.s, 32);
110 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
111 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
112 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
113
114 /*
115 * Init the structure.
116 */
117 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
118 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
119
120 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
121#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
122 true
123#else
124 false
125#endif
126 );
127 AssertLogRelRCReturn(rc, rc);
128
129 bool fEnabled;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
131 AssertLogRelRCReturn(rc, rc);
132 pVM->em.s.fGuruOnTripleFault = !fEnabled;
133 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
134 {
135 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
136 pVM->em.s.fGuruOnTripleFault = true;
137 }
138
139 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
140
141 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
142 * Whether to try correlate exit history in any context, detect hot spots and
143 * try optimize these using IEM if there are other exits close by. This
144 * overrides the context specific settings. */
145 bool fExitOptimizationEnabled = true;
146 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
147 AssertLogRelRCReturn(rc, rc);
148
149 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
150 * Whether to optimize exits in ring-0. Setting this to false will also disable
151 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
152 * capabilities of the host kernel, this optimization may be unavailable. */
153 bool fExitOptimizationEnabledR0 = true;
154 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
155 AssertLogRelRCReturn(rc, rc);
156 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
157
158 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
159 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
160 * hooks are in effect). */
161 /** @todo change the default to true here */
162 bool fExitOptimizationEnabledR0PreemptDisabled = true;
163 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
164 AssertLogRelRCReturn(rc, rc);
165 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
166
167 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
168 * Maximum number of instruction to let EMHistoryExec execute in one go. */
169 uint16_t cHistoryExecMaxInstructions = 8192;
170 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
171 AssertLogRelRCReturn(rc, rc);
172 if (cHistoryExecMaxInstructions < 16)
173 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
174
175 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
176 * Maximum number of instruction between exits during probing. */
177 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
178#ifdef RT_OS_WINDOWS
179 if (VM_IS_NEM_ENABLED(pVM))
180 cHistoryProbeMaxInstructionsWithoutExit = 32;
181#endif
182 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
183 cHistoryProbeMaxInstructionsWithoutExit);
184 AssertLogRelRCReturn(rc, rc);
185 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
186 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
187 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
188
189 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
190 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
191 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
192 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
193 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
194 cHistoryProbeMinInstructions);
195 AssertLogRelRCReturn(rc, rc);
196
197 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
198 {
199 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
200 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
201 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
202 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
203 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
204 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
205 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
206 }
207
208 /*
209 * Saved state.
210 */
211 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
212 NULL, NULL, NULL,
213 NULL, emR3Save, NULL,
214 NULL, emR3Load, NULL);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
219 {
220 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
221
222 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
223 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
224 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
225 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
226
227# define EM_REG_COUNTER(a, b, c) \
228 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
229 AssertRC(rc);
230
231# define EM_REG_COUNTER_USED(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_PROFILE(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE_ADV(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243 /*
244 * Statistics.
245 */
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
248 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
249
250 /* these should be considered for release statistics. */
251 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
252 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
253 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
254#endif
255 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
257#ifdef VBOX_WITH_STATISTICS
258 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
259 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
260 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
261#endif
262 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
263 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
264#ifdef VBOX_WITH_STATISTICS
265 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
266 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
267 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
268 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
269 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
270 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
271#endif /* VBOX_WITH_STATISTICS */
272
273 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
274 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
275 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
276 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
277 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
278
279 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
280
281 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
282 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
283 AssertRC(rc);
284
285 /* History record statistics */
286 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
287 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
288 AssertRC(rc);
289
290 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
291 {
292 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
293 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
294 AssertRC(rc);
295 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
296 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
297 AssertRC(rc);
298 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
299 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
300 AssertRC(rc);
301 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
302 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
303 AssertRC(rc);
304 }
305
306 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
307 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
309 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
312 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
313 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
314 }
315
316 emR3InitDbg(pVM);
317 return VINF_SUCCESS;
318}
319
320
321/**
322 * Called when a VM initialization stage is completed.
323 *
324 * @returns VBox status code.
325 * @param pVM The cross context VM structure.
326 * @param enmWhat The initialization state that was completed.
327 */
328VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
329{
330 if (enmWhat == VMINITCOMPLETED_RING0)
331 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
332 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
333 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
334 return VINF_SUCCESS;
335}
336
337
338/**
339 * Applies relocations to data and code managed by this
340 * component. This function will be called at init and
341 * whenever the VMM need to relocate it self inside the GC.
342 *
343 * @param pVM The cross context VM structure.
344 */
345VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
346{
347 LogFlow(("EMR3Relocate\n"));
348 RT_NOREF(pVM);
349}
350
351
352/**
353 * Reset the EM state for a CPU.
354 *
355 * Called by EMR3Reset and hot plugging.
356 *
357 * @param pVCpu The cross context virtual CPU structure.
358 */
359VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
360{
361 /* Reset scheduling state. */
362 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
363
364 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
365 out of the HALTED state here so that enmPrevState doesn't end up as
366 HALTED when EMR3Execute returns. */
367 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
368 {
369 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
370 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
371 }
372}
373
374
375/**
376 * Reset notification.
377 *
378 * @param pVM The cross context VM structure.
379 */
380VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
381{
382 Log(("EMR3Reset: \n"));
383 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
384 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
385}
386
387
388/**
389 * Terminates the EM.
390 *
391 * Termination means cleaning up and freeing all resources,
392 * the VM it self is at this point powered off or suspended.
393 *
394 * @returns VBox status code.
395 * @param pVM The cross context VM structure.
396 */
397VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
398{
399 RT_NOREF(pVM);
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Execute state save operation.
406 *
407 * @returns VBox status code.
408 * @param pVM The cross context VM structure.
409 * @param pSSM SSM operation handle.
410 */
411static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
412{
413 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
414 {
415 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
416
417 SSMR3PutBool(pSSM, false /*fForceRAW*/);
418
419 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
420 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
421 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
422
423 /* Save mwait state. */
424 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
425 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
426 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
429 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
430 AssertRCReturn(rc, rc);
431 }
432 return VINF_SUCCESS;
433}
434
435
436/**
437 * Execute state load operation.
438 *
439 * @returns VBox status code.
440 * @param pVM The cross context VM structure.
441 * @param pSSM SSM operation handle.
442 * @param uVersion Data layout version.
443 * @param uPass The data pass.
444 */
445static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
446{
447 /*
448 * Validate version.
449 */
450 if ( uVersion > EM_SAVED_STATE_VERSION
451 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
452 {
453 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
454 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
455 }
456 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
457
458 /*
459 * Load the saved state.
460 */
461 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
462 {
463 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
464
465 bool fForceRAWIgnored;
466 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
467 AssertRCReturn(rc, rc);
468
469 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
470 {
471 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
472 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
473
474 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
475 }
476 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
477 {
478 /* Load mwait state. */
479 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
480 AssertRCReturn(rc, rc);
481 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
482 AssertRCReturn(rc, rc);
483 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
484 AssertRCReturn(rc, rc);
485 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
486 AssertRCReturn(rc, rc);
487 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
490 AssertRCReturn(rc, rc);
491 }
492 }
493 return VINF_SUCCESS;
494}
495
496
497/**
498 * Argument packet for emR3SetExecutionPolicy.
499 */
500struct EMR3SETEXECPOLICYARGS
501{
502 EMEXECPOLICY enmPolicy;
503 bool fEnforce;
504};
505
506
507/**
508 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
509 */
510static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
511{
512 /*
513 * Only the first CPU changes the variables.
514 */
515 if (pVCpu->idCpu == 0)
516 {
517 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
518 switch (pArgs->enmPolicy)
519 {
520 case EMEXECPOLICY_RECOMPILE_RING0:
521 case EMEXECPOLICY_RECOMPILE_RING3:
522 break;
523 case EMEXECPOLICY_IEM_ALL:
524 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
525
526 /* For making '.alliem 1' useful during debugging, transition the
527 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
528 for (VMCPUID i = 0; i < pVM->cCpus; i++)
529 {
530 PVMCPU pVCpuX = pVM->apCpusR3[i];
531 switch (pVCpuX->em.s.enmState)
532 {
533 case EMSTATE_DEBUG_GUEST_RAW:
534 case EMSTATE_DEBUG_GUEST_HM:
535 case EMSTATE_DEBUG_GUEST_NEM:
536 case EMSTATE_DEBUG_GUEST_REM:
537 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
538 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
539 break;
540 case EMSTATE_DEBUG_GUEST_IEM:
541 default:
542 break;
543 }
544 }
545 break;
546 default:
547 AssertFailedReturn(VERR_INVALID_PARAMETER);
548 }
549 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
550 }
551
552 /*
553 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
554 */
555 return pVCpu->em.s.enmState == EMSTATE_RAW
556 || pVCpu->em.s.enmState == EMSTATE_HM
557 || pVCpu->em.s.enmState == EMSTATE_NEM
558 || pVCpu->em.s.enmState == EMSTATE_IEM
559 || pVCpu->em.s.enmState == EMSTATE_REM
560 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
561 ? VINF_EM_RESCHEDULE
562 : VINF_SUCCESS;
563}
564
565
566/**
567 * Changes an execution scheduling policy parameter.
568 *
569 * This is used to enable or disable raw-mode / hardware-virtualization
570 * execution of user and supervisor code.
571 *
572 * @returns VINF_SUCCESS on success.
573 * @returns VINF_RESCHEDULE if a rescheduling might be required.
574 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
575 *
576 * @param pUVM The user mode VM handle.
577 * @param enmPolicy The scheduling policy to change.
578 * @param fEnforce Whether to enforce the policy or not.
579 */
580VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
581{
582 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
583 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
584 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
585
586 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
587 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
588}
589
590
591/**
592 * Queries an execution scheduling policy parameter.
593 *
594 * @returns VBox status code
595 * @param pUVM The user mode VM handle.
596 * @param enmPolicy The scheduling policy to query.
597 * @param pfEnforced Where to return the current value.
598 */
599VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
600{
601 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
602 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
603 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
604 PVM pVM = pUVM->pVM;
605 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
606
607 /* No need to bother EMTs with a query. */
608 switch (enmPolicy)
609 {
610 case EMEXECPOLICY_RECOMPILE_RING0:
611 case EMEXECPOLICY_RECOMPILE_RING3:
612 *pfEnforced = false;
613 break;
614 case EMEXECPOLICY_IEM_ALL:
615 *pfEnforced = pVM->em.s.fIemExecutesAll;
616 break;
617 default:
618 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
619 }
620
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Queries the main execution engine of the VM.
627 *
628 * @returns VBox status code
629 * @param pUVM The user mode VM handle.
630 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
631 */
632VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
633{
634 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
635 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
636
637 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
638 PVM pVM = pUVM->pVM;
639 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
640
641 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
642 return VINF_SUCCESS;
643}
644
645
646/**
647 * Raise a fatal error.
648 *
649 * Safely terminate the VM with full state report and stuff. This function
650 * will naturally never return.
651 *
652 * @param pVCpu The cross context virtual CPU structure.
653 * @param rc VBox status code.
654 */
655VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
656{
657 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
658 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
659}
660
661
662#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
663/**
664 * Gets the EM state name.
665 *
666 * @returns pointer to read only state name,
667 * @param enmState The state.
668 */
669static const char *emR3GetStateName(EMSTATE enmState)
670{
671 switch (enmState)
672 {
673 case EMSTATE_NONE: return "EMSTATE_NONE";
674 case EMSTATE_RAW: return "EMSTATE_RAW";
675 case EMSTATE_HM: return "EMSTATE_HM";
676 case EMSTATE_IEM: return "EMSTATE_IEM";
677 case EMSTATE_REM: return "EMSTATE_REM";
678 case EMSTATE_HALTED: return "EMSTATE_HALTED";
679 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
680 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
681 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
682 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
683 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
684 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
685 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
686 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
687 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
688 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
689 case EMSTATE_NEM: return "EMSTATE_NEM";
690 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
691 default: return "Unknown!";
692 }
693}
694#endif /* LOG_ENABLED || VBOX_STRICT */
695
696
697#if !defined(VBOX_VMM_TARGET_ARMV8)
698/**
699 * Handle pending ring-3 I/O port write.
700 *
701 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
702 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
703 *
704 * @returns Strict VBox status code.
705 * @param pVM The cross context VM structure.
706 * @param pVCpu The cross context virtual CPU structure.
707 */
708VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
709{
710 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
711
712 /* Get and clear the pending data. */
713 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
714 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
715 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
716 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
717 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
718
719 /* Assert sanity. */
720 switch (cbValue)
721 {
722 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
723 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
724 case 4: break;
725 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
726 }
727 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
728
729 /* Do the work.*/
730 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
731 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
732 if (IOM_SUCCESS(rcStrict))
733 {
734 pVCpu->cpum.GstCtx.rip += cbInstr;
735 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
736 }
737 return rcStrict;
738}
739
740
741/**
742 * Handle pending ring-3 I/O port write.
743 *
744 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
745 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
746 *
747 * @returns Strict VBox status code.
748 * @param pVM The cross context VM structure.
749 * @param pVCpu The cross context virtual CPU structure.
750 */
751VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
752{
753 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
754
755 /* Get and clear the pending data. */
756 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
757 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
758 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
759 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
760
761 /* Assert sanity. */
762 switch (cbValue)
763 {
764 case 1: break;
765 case 2: break;
766 case 4: break;
767 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
768 }
769 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
770 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
771
772 /* Do the work.*/
773 uint32_t uValue = 0;
774 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
775 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
776 if (IOM_SUCCESS(rcStrict))
777 {
778 if (cbValue == 4)
779 pVCpu->cpum.GstCtx.rax = uValue;
780 else if (cbValue == 2)
781 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
782 else
783 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
784 pVCpu->cpum.GstCtx.rip += cbInstr;
785 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
786 }
787 return rcStrict;
788}
789
790
791/**
792 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
793 * Worker for emR3ExecuteSplitLockInstruction}
794 */
795static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
796{
797 /* Only execute on the specified EMT. */
798 if (pVCpu == (PVMCPU)pvUser)
799 {
800 LogFunc(("\n"));
801 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
802 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
803 if (rcStrict == VINF_IEM_RAISED_XCPT)
804 rcStrict = VINF_SUCCESS;
805 return rcStrict;
806 }
807 RT_NOREF(pVM);
808 return VINF_SUCCESS;
809}
810
811
812/**
813 * Handle an instruction causing a split cacheline lock access in SMP VMs.
814 *
815 * Generally we only get here if the host has split-lock detection enabled and
816 * this caused an \#AC because of something the guest did. If we interpret the
817 * instruction as-is, we'll likely just repeat the split-lock access and
818 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
819 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
820 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
821 * disregard the lock prefix when emulating the instruction.
822 *
823 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
824 * feature when entering guest context, but the support for the feature isn't a
825 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
826 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
827 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
828 * propert detection to SUPDrv later if we find it necessary.
829 *
830 * @see @bugref{10052}
831 *
832 * @returns Strict VBox status code.
833 * @param pVM The cross context VM structure.
834 * @param pVCpu The cross context virtual CPU structure.
835 */
836VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
837{
838 LogFunc(("\n"));
839 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
840}
841#endif /* VBOX_VMM_TARGET_ARMV8 */
842
843
844/**
845 * Debug loop.
846 *
847 * @returns VBox status code for EM.
848 * @param pVM The cross context VM structure.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param rc Current EM VBox status code.
851 */
852static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
853{
854 for (;;)
855 {
856 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
857 const VBOXSTRICTRC rcLast = rc;
858
859 /*
860 * Debug related RC.
861 */
862 switch (VBOXSTRICTRC_VAL(rc))
863 {
864 /*
865 * Single step an instruction.
866 */
867 case VINF_EM_DBG_STEP:
868 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
869 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
870 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
871#if !defined(VBOX_VMM_TARGET_ARMV8)
872 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
873 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
874#endif
875 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
876 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
877#ifdef VBOX_WITH_REM /** @todo fix me? */
878 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
879 rc = emR3RemStep(pVM, pVCpu);
880#endif
881 else
882 {
883 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
884 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
885 rc = VINF_EM_DBG_STEPPED;
886 }
887 break;
888
889 /*
890 * Simple events: stepped, breakpoint, stop/assertion.
891 */
892 case VINF_EM_DBG_STEPPED:
893 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
894 break;
895
896 case VINF_EM_DBG_BREAKPOINT:
897 rc = DBGFR3BpHit(pVM, pVCpu);
898 break;
899
900 case VINF_EM_DBG_STOP:
901 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
902 break;
903
904 case VINF_EM_DBG_EVENT:
905 rc = DBGFR3EventHandlePending(pVM, pVCpu);
906 break;
907
908 case VINF_EM_DBG_HYPER_STEPPED:
909 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
910 break;
911
912 case VINF_EM_DBG_HYPER_BREAKPOINT:
913 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
914 break;
915
916 case VINF_EM_DBG_HYPER_ASSERTION:
917 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
918 RTLogFlush(NULL);
919 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
920 break;
921
922 /*
923 * Guru meditation.
924 */
925 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
926 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
927 break;
928 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
929 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
930 break;
931 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
932 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
933 break;
934
935 default: /** @todo don't use default for guru, but make special errors code! */
936 {
937 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
938 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
939 break;
940 }
941 }
942
943 /*
944 * Process the result.
945 */
946 switch (VBOXSTRICTRC_VAL(rc))
947 {
948 /*
949 * Continue the debugging loop.
950 */
951 case VINF_EM_DBG_STEP:
952 case VINF_EM_DBG_STOP:
953 case VINF_EM_DBG_EVENT:
954 case VINF_EM_DBG_STEPPED:
955 case VINF_EM_DBG_BREAKPOINT:
956 case VINF_EM_DBG_HYPER_STEPPED:
957 case VINF_EM_DBG_HYPER_BREAKPOINT:
958 case VINF_EM_DBG_HYPER_ASSERTION:
959 break;
960
961 /*
962 * Resuming execution (in some form) has to be done here if we got
963 * a hypervisor debug event.
964 */
965 case VINF_SUCCESS:
966 case VINF_EM_RESUME:
967 case VINF_EM_SUSPEND:
968 case VINF_EM_RESCHEDULE:
969 case VINF_EM_RESCHEDULE_RAW:
970 case VINF_EM_RESCHEDULE_REM:
971 case VINF_EM_HALT:
972 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
973 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
974 if (rc == VINF_SUCCESS)
975 rc = VINF_EM_RESCHEDULE;
976 return rc;
977
978 /*
979 * The debugger isn't attached.
980 * We'll simply turn the thing off since that's the easiest thing to do.
981 */
982 case VERR_DBGF_NOT_ATTACHED:
983 switch (VBOXSTRICTRC_VAL(rcLast))
984 {
985 case VINF_EM_DBG_HYPER_STEPPED:
986 case VINF_EM_DBG_HYPER_BREAKPOINT:
987 case VINF_EM_DBG_HYPER_ASSERTION:
988 case VERR_TRPM_PANIC:
989 case VERR_TRPM_DONT_PANIC:
990 case VERR_VMM_RING0_ASSERTION:
991 case VERR_VMM_HYPER_CR3_MISMATCH:
992 case VERR_VMM_RING3_CALL_DISABLED:
993 return rcLast;
994 }
995 return VINF_EM_OFF;
996
997 /*
998 * Status codes terminating the VM in one or another sense.
999 */
1000 case VINF_EM_TERMINATE:
1001 case VINF_EM_OFF:
1002 case VINF_EM_RESET:
1003 case VINF_EM_NO_MEMORY:
1004 case VINF_EM_RAW_STALE_SELECTOR:
1005 case VINF_EM_RAW_IRET_TRAP:
1006 case VERR_TRPM_PANIC:
1007 case VERR_TRPM_DONT_PANIC:
1008 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1009 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1010 case VERR_VMM_RING0_ASSERTION:
1011 case VERR_VMM_HYPER_CR3_MISMATCH:
1012 case VERR_VMM_RING3_CALL_DISABLED:
1013 case VERR_INTERNAL_ERROR:
1014 case VERR_INTERNAL_ERROR_2:
1015 case VERR_INTERNAL_ERROR_3:
1016 case VERR_INTERNAL_ERROR_4:
1017 case VERR_INTERNAL_ERROR_5:
1018 case VERR_IPE_UNEXPECTED_STATUS:
1019 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1020 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1021 return rc;
1022
1023 /*
1024 * The rest is unexpected, and will keep us here.
1025 */
1026 default:
1027 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1028 break;
1029 }
1030 } /* debug for ever */
1031}
1032
1033
1034#if defined(VBOX_WITH_REM) || defined(DEBUG)
1035/**
1036 * Steps recompiled code.
1037 *
1038 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1039 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1040 *
1041 * @param pVM The cross context VM structure.
1042 * @param pVCpu The cross context virtual CPU structure.
1043 */
1044static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1045{
1046#if defined(VBOX_VMM_TARGET_ARMV8)
1047 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1048#else
1049 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1050#endif
1051
1052 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1053
1054#if defined(VBOX_VMM_TARGET_ARMV8)
1055 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1056#else
1057 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1058#endif
1059 return rc;
1060}
1061#endif /* VBOX_WITH_REM || DEBUG */
1062
1063
1064/**
1065 * Executes recompiled code.
1066 *
1067 * This function contains the recompiler version of the inner
1068 * execution loop (the outer loop being in EMR3ExecuteVM()).
1069 *
1070 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1071 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1072 *
1073 * @param pVM The cross context VM structure.
1074 * @param pVCpu The cross context virtual CPU structure.
1075 * @param pfFFDone Where to store an indicator telling whether or not
1076 * FFs were done before returning.
1077 *
1078 */
1079static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1080{
1081#ifdef LOG_ENABLED
1082# if defined(VBOX_VMM_TARGET_ARMV8)
1083 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1084# else
1085 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1086
1087 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1088 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1089 else
1090 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1091# endif
1092#endif
1093 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1094
1095 /*
1096 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1097 * or the REM suggests raw-mode execution.
1098 */
1099 *pfFFDone = false;
1100 uint32_t cLoops = 0;
1101 int rc = VINF_SUCCESS;
1102 for (;;)
1103 {
1104 /*
1105 * Execute REM.
1106 */
1107 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1108 {
1109 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1110 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1111 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1112 }
1113 else
1114 {
1115 /* Give up this time slice; virtual time continues */
1116 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1117 RTThreadSleep(5);
1118 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1119 rc = VINF_SUCCESS;
1120 }
1121
1122 /*
1123 * Deal with high priority post execution FFs before doing anything
1124 * else. Sync back the state and leave the lock to be on the safe side.
1125 */
1126 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1127 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1128 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1129
1130 /*
1131 * Process the returned status code.
1132 */
1133 if (rc != VINF_SUCCESS)
1134 {
1135 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1136 break;
1137 if (rc != VINF_REM_INTERRUPED_FF)
1138 {
1139 /* Try dodge unimplemented IEM trouble by reschduling. */
1140 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1141 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1142 {
1143 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1144 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1145 {
1146 rc = VINF_EM_RESCHEDULE;
1147 break;
1148 }
1149 }
1150
1151 /*
1152 * Anything which is not known to us means an internal error
1153 * and the termination of the VM!
1154 */
1155 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1156 break;
1157 }
1158 }
1159
1160
1161 /*
1162 * Check and execute forced actions.
1163 *
1164 * Sync back the VM state and leave the lock before calling any of
1165 * these, you never know what's going to happen here.
1166 */
1167#ifdef VBOX_HIGH_RES_TIMERS_HACK
1168 TMTimerPollVoid(pVM, pVCpu);
1169#endif
1170 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1171 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1172 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1173 {
1174 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1175 rc = emR3ForcedActions(pVM, pVCpu, rc);
1176 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1177 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1178 if ( rc != VINF_SUCCESS
1179 && rc != VINF_EM_RESCHEDULE_REM)
1180 {
1181 *pfFFDone = true;
1182 break;
1183 }
1184 }
1185
1186 /*
1187 * Have to check if we can get back to fast execution mode every so often.
1188 */
1189 if (!(++cLoops & 7))
1190 {
1191 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1192 if ( enmCheck != EMSTATE_REM
1193 && enmCheck != EMSTATE_IEM_THEN_REM)
1194 {
1195 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1196 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1197 return VINF_EM_RESCHEDULE;
1198 }
1199 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1200 }
1201
1202 } /* The Inner Loop, recompiled execution mode version. */
1203
1204 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1205 return rc;
1206}
1207
1208
1209#ifdef DEBUG
1210
1211int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1212{
1213 EMSTATE enmOldState = pVCpu->em.s.enmState;
1214
1215 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1216
1217 Log(("Single step BEGIN:\n"));
1218 for (uint32_t i = 0; i < cIterations; i++)
1219 {
1220 DBGFR3PrgStep(pVCpu);
1221 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1222 emR3RemStep(pVM, pVCpu);
1223 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1224 break;
1225 }
1226 Log(("Single step END:\n"));
1227#if defined(VBOX_VMM_TARGET_ARMV8)
1228 AssertReleaseFailed();
1229#else
1230 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1231#endif
1232 pVCpu->em.s.enmState = enmOldState;
1233 return VINF_EM_RESCHEDULE;
1234}
1235
1236#endif /* DEBUG */
1237
1238
1239/**
1240 * Try execute the problematic code in IEM first, then fall back on REM if there
1241 * is too much of it or if IEM doesn't implement something.
1242 *
1243 * @returns Strict VBox status code from IEMExecLots.
1244 * @param pVM The cross context VM structure.
1245 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1246 * @param pfFFDone Force flags done indicator.
1247 *
1248 * @thread EMT(pVCpu)
1249 */
1250static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1251{
1252#if defined(VBOX_VMM_TARGET_ARMV8)
1253 LogFlow(("emR3ExecuteIemThenRem: %RGv\n", CPUMGetGuestFlatPC(pVCpu)));
1254#else
1255 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1256#endif
1257 *pfFFDone = false;
1258
1259 /*
1260 * Execute in IEM for a while.
1261 */
1262 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1263 {
1264 uint32_t cInstructions;
1265 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1266 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1267 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1268 if (rcStrict != VINF_SUCCESS)
1269 {
1270 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1271 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1272 break;
1273
1274 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1275 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1276 return rcStrict;
1277 }
1278
1279 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1280 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1281 {
1282 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1283 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1284 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1285 pVCpu->em.s.enmState = enmNewState;
1286 return VINF_SUCCESS;
1287 }
1288
1289 /*
1290 * Check for pending actions.
1291 */
1292 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1293 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1294 return VINF_SUCCESS;
1295 }
1296
1297 /*
1298 * Switch to REM.
1299 */
1300 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1301 pVCpu->em.s.enmState = EMSTATE_REM;
1302 return VINF_SUCCESS;
1303}
1304
1305
1306/**
1307 * Decides whether to execute RAW, HWACC or REM.
1308 *
1309 * @returns new EM state
1310 * @param pVM The cross context VM structure.
1311 * @param pVCpu The cross context virtual CPU structure.
1312 */
1313EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1314{
1315 /*
1316 * We stay in the wait for SIPI state unless explicitly told otherwise.
1317 */
1318 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1319 return EMSTATE_WAIT_SIPI;
1320
1321 /*
1322 * Execute everything in IEM?
1323 */
1324 if ( pVM->em.s.fIemExecutesAll
1325 || VM_IS_EXEC_ENGINE_IEM(pVM))
1326 return EMSTATE_IEM;
1327
1328#if !defined(VBOX_VMM_TARGET_ARMV8)
1329 if (VM_IS_HM_ENABLED(pVM))
1330 {
1331 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1332 return EMSTATE_HM;
1333 }
1334 else
1335#endif
1336 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1337 return EMSTATE_NEM;
1338
1339 /*
1340 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1341 * turns off monitoring features essential for raw mode!
1342 */
1343 return EMSTATE_IEM_THEN_REM;
1344}
1345
1346
1347/**
1348 * Executes all high priority post execution force actions.
1349 *
1350 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1351 * fatal error status code.
1352 *
1353 * @param pVM The cross context VM structure.
1354 * @param pVCpu The cross context virtual CPU structure.
1355 * @param rc The current strict VBox status code rc.
1356 */
1357VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1358{
1359 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1360
1361 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1362 PDMCritSectBothFF(pVM, pVCpu);
1363
1364#if !defined(VBOX_VMM_TARGET_ARMV8)
1365 /* Update CR3 (Nested Paging case for HM). */
1366 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1367 {
1368 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1369 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1370 if (RT_FAILURE(rc2))
1371 return rc2;
1372 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1373 }
1374#endif
1375
1376 /* IEM has pending work (typically memory write after INS instruction). */
1377 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1378 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1379
1380 /* IOM has pending work (comitting an I/O or MMIO write). */
1381 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1382 {
1383 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1384 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1385 { /* half likely, or at least it's a line shorter. */ }
1386 else if (rc == VINF_SUCCESS)
1387 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1388 else
1389 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1390 }
1391
1392 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1393 {
1394 if ( rc > VINF_EM_NO_MEMORY
1395 && rc <= VINF_EM_LAST)
1396 rc = VINF_EM_NO_MEMORY;
1397 }
1398
1399 return rc;
1400}
1401
1402
1403#if !defined(VBOX_VMM_TARGET_ARMV8)
1404/**
1405 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1406 *
1407 * @returns VBox status code.
1408 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1409 * @param pVCpu The cross context virtual CPU structure.
1410 */
1411static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1412{
1413#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1414 /* Handle the "external interrupt" VM-exit intercept. */
1415 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1416 {
1417 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1418 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1419 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1420 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1421 return VBOXSTRICTRC_TODO(rcStrict);
1422 }
1423#else
1424 RT_NOREF(pVCpu);
1425#endif
1426 return VINF_NO_CHANGE;
1427}
1428
1429
1430/**
1431 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1432 *
1433 * @returns VBox status code.
1434 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1435 * @param pVCpu The cross context virtual CPU structure.
1436 */
1437static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1438{
1439#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1440 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1441 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1442 {
1443 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1444 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1445 if (RT_SUCCESS(rcStrict))
1446 {
1447 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1448 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1449 return VBOXSTRICTRC_VAL(rcStrict);
1450 }
1451
1452 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1453 return VINF_EM_TRIPLE_FAULT;
1454 }
1455#else
1456 NOREF(pVCpu);
1457#endif
1458 return VINF_NO_CHANGE;
1459}
1460
1461
1462/**
1463 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1464 *
1465 * @returns VBox status code.
1466 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 */
1469static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1470{
1471#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1472 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1473 {
1474 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1475 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1476 if (RT_SUCCESS(rcStrict))
1477 {
1478 Assert(rcStrict != VINF_SVM_VMEXIT);
1479 return VBOXSTRICTRC_VAL(rcStrict);
1480 }
1481 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1482 return VINF_EM_TRIPLE_FAULT;
1483 }
1484#else
1485 NOREF(pVCpu);
1486#endif
1487 return VINF_NO_CHANGE;
1488}
1489#endif
1490
1491
1492/**
1493 * Executes all pending forced actions.
1494 *
1495 * Forced actions can cause execution delays and execution
1496 * rescheduling. The first we deal with using action priority, so
1497 * that for instance pending timers aren't scheduled and ran until
1498 * right before execution. The rescheduling we deal with using
1499 * return codes. The same goes for VM termination, only in that case
1500 * we exit everything.
1501 *
1502 * @returns VBox status code of equal or greater importance/severity than rc.
1503 * The most important ones are: VINF_EM_RESCHEDULE,
1504 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1505 *
1506 * @param pVM The cross context VM structure.
1507 * @param pVCpu The cross context virtual CPU structure.
1508 * @param rc The current rc.
1509 *
1510 */
1511int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1512{
1513 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1514#ifdef VBOX_STRICT
1515 int rcIrq = VINF_SUCCESS;
1516#endif
1517 int rc2;
1518#define UPDATE_RC() \
1519 do { \
1520 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1521 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1522 break; \
1523 if (!rc || rc2 < rc) \
1524 rc = rc2; \
1525 } while (0)
1526 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1527
1528 /*
1529 * Post execution chunk first.
1530 */
1531 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1532 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1533 {
1534 /*
1535 * EMT Rendezvous (must be serviced before termination).
1536 */
1537 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1538 {
1539 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1540 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1541 UPDATE_RC();
1542 /** @todo HACK ALERT! The following test is to make sure EM+TM
1543 * thinks the VM is stopped/reset before the next VM state change
1544 * is made. We need a better solution for this, or at least make it
1545 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1546 * VINF_EM_SUSPEND). */
1547 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1548 {
1549 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1550 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1551 return rc;
1552 }
1553 }
1554
1555 /*
1556 * State change request (cleared by vmR3SetStateLocked).
1557 */
1558 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1559 {
1560 VMSTATE enmState = VMR3GetState(pVM);
1561 switch (enmState)
1562 {
1563 case VMSTATE_FATAL_ERROR:
1564 case VMSTATE_FATAL_ERROR_LS:
1565 case VMSTATE_GURU_MEDITATION:
1566 case VMSTATE_GURU_MEDITATION_LS:
1567 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1568 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1569 return VINF_EM_SUSPEND;
1570
1571 case VMSTATE_DESTROYING:
1572 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1573 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1574 return VINF_EM_TERMINATE;
1575
1576 default:
1577 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1578 }
1579 }
1580
1581 /*
1582 * Debugger Facility polling.
1583 */
1584 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1585 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1586 {
1587 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1588 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1589 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1590 * somewhere before we get here, I would think. */
1591 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1592 rc = rc2;
1593 else
1594 UPDATE_RC();
1595 }
1596
1597 /*
1598 * Postponed reset request.
1599 */
1600 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1601 {
1602 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1603 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1604 UPDATE_RC();
1605 }
1606
1607 /*
1608 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1609 */
1610 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1611 {
1612 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1613 UPDATE_RC();
1614 if (rc == VINF_EM_NO_MEMORY)
1615 return rc;
1616 }
1617
1618 /* check that we got them all */
1619 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1620 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1621 }
1622
1623 /*
1624 * Normal priority then.
1625 * (Executed in no particular order.)
1626 */
1627 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1628 {
1629 /*
1630 * PDM Queues are pending.
1631 */
1632 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1633 PDMR3QueueFlushAll(pVM);
1634
1635 /*
1636 * PDM DMA transfers are pending.
1637 */
1638 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1639 PDMR3DmaRun(pVM);
1640
1641 /*
1642 * EMT Rendezvous (make sure they are handled before the requests).
1643 */
1644 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1645 {
1646 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1647 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1648 UPDATE_RC();
1649 /** @todo HACK ALERT! The following test is to make sure EM+TM
1650 * thinks the VM is stopped/reset before the next VM state change
1651 * is made. We need a better solution for this, or at least make it
1652 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1653 * VINF_EM_SUSPEND). */
1654 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1655 {
1656 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1657 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1658 return rc;
1659 }
1660 }
1661
1662 /*
1663 * Requests from other threads.
1664 */
1665 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1666 {
1667 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1668 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1669 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1670 {
1671 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1672 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1673 return rc2;
1674 }
1675 UPDATE_RC();
1676 /** @todo HACK ALERT! The following test is to make sure EM+TM
1677 * thinks the VM is stopped/reset before the next VM state change
1678 * is made. We need a better solution for this, or at least make it
1679 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1680 * VINF_EM_SUSPEND). */
1681 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1682 {
1683 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1684 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1685 return rc;
1686 }
1687 }
1688
1689 /* check that we got them all */
1690 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1691 }
1692
1693 /*
1694 * Normal priority then. (per-VCPU)
1695 * (Executed in no particular order.)
1696 */
1697 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1698 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1699 {
1700 /*
1701 * Requests from other threads.
1702 */
1703 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1704 {
1705 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1706 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1707 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1708 {
1709 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1710 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1711 return rc2;
1712 }
1713 UPDATE_RC();
1714 /** @todo HACK ALERT! The following test is to make sure EM+TM
1715 * thinks the VM is stopped/reset before the next VM state change
1716 * is made. We need a better solution for this, or at least make it
1717 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1718 * VINF_EM_SUSPEND). */
1719 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1720 {
1721 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1722 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1723 return rc;
1724 }
1725 }
1726
1727 /* check that we got them all */
1728 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1729 }
1730
1731 /*
1732 * High priority pre execution chunk last.
1733 * (Executed in ascending priority order.)
1734 */
1735 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1736 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1737 {
1738 /*
1739 * Timers before interrupts.
1740 */
1741 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1742 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1743 TMR3TimerQueuesDo(pVM);
1744
1745#if !defined(VBOX_VMM_TARGET_ARMV8)
1746 /*
1747 * Pick up asynchronously posted interrupts into the APIC.
1748 */
1749 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1750 APICUpdatePendingInterrupts(pVCpu);
1751
1752 /*
1753 * The instruction following an emulated STI should *always* be executed!
1754 *
1755 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1756 * the eip is the same as the inhibited instr address. Before we
1757 * are able to execute this instruction in raw mode (iret to
1758 * guest code) an external interrupt might force a world switch
1759 * again. Possibly allowing a guest interrupt to be dispatched
1760 * in the process. This could break the guest. Sounds very
1761 * unlikely, but such timing sensitive problem are not as rare as
1762 * you might think.
1763 *
1764 * Note! This used to be a force action flag. Can probably ditch this code.
1765 */
1766 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1767 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1768 {
1769 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1770 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1771 {
1772 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1773 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1774 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1775 }
1776 else
1777 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1778 }
1779
1780 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1781 * delivered. */
1782
1783# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1784 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1785 {
1786 /*
1787 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1788 * Takes priority over even SMI and INIT signals.
1789 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1790 */
1791 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1792 {
1793 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1794 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1795 UPDATE_RC();
1796 }
1797
1798 /*
1799 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1800 * Takes priority over "Traps on the previous instruction".
1801 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1802 */
1803 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1804 {
1805 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1806 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1807 UPDATE_RC();
1808 }
1809
1810 /*
1811 * VMX Nested-guest preemption timer VM-exit.
1812 * Takes priority over NMI-window VM-exits.
1813 */
1814 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1815 {
1816 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1817 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1818 UPDATE_RC();
1819 }
1820 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1821 }
1822# endif
1823
1824 /*
1825 * Guest event injection.
1826 */
1827 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1828 bool fWakeupPending = false;
1829 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1830 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1831 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1832 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1833 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1834 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1835 /** @todo r=bird: But interrupt shadows probably do not block vmexits due to host interrupts... */
1836 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1837 {
1838 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1839 {
1840 bool fInVmxNonRootMode;
1841 bool fInSvmHwvirtMode;
1842 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1843 {
1844 fInVmxNonRootMode = false;
1845 fInSvmHwvirtMode = false;
1846 }
1847 else
1848 {
1849 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1850 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1851 }
1852
1853 if (0)
1854 { }
1855# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1856 /*
1857 * VMX NMI-window VM-exit.
1858 * Takes priority over non-maskable interrupts (NMIs).
1859 * Interrupt shadows block NMI-window VM-exits.
1860 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1861 *
1862 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1863 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1864 */
1865 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1866 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1867 {
1868 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1869 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1870 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1871 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1872 && rc2 != VINF_VMX_VMEXIT
1873 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1874 UPDATE_RC();
1875 }
1876# endif
1877 /*
1878 * NMIs (take priority over external interrupts).
1879 */
1880 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1881 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1882 {
1883# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1884 if ( fInVmxNonRootMode
1885 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1886 {
1887 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1888 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1889 UPDATE_RC();
1890 }
1891 else
1892# endif
1893# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1894 if ( fInSvmHwvirtMode
1895 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1896 {
1897 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1898 AssertMsg( rc2 != VINF_SVM_VMEXIT
1899 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1900 UPDATE_RC();
1901 }
1902 else
1903# endif
1904 {
1905 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1906 if (rc2 == VINF_SUCCESS)
1907 {
1908 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1909 fWakeupPending = true;
1910 if (pVM->em.s.fIemExecutesAll)
1911 rc2 = VINF_EM_RESCHEDULE;
1912 else
1913 {
1914 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1915 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1916 : VINF_EM_RESCHEDULE_REM;
1917 }
1918 }
1919 UPDATE_RC();
1920 }
1921 }
1922# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1923 /*
1924 * VMX Interrupt-window VM-exits.
1925 * Takes priority over external interrupts.
1926 */
1927 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1928 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1929 {
1930 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1931 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1932 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1933 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1934 && rc2 != VINF_VMX_VMEXIT
1935 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1936 UPDATE_RC();
1937 }
1938# endif
1939# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1940 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1941 * actually pending like we currently do. */
1942# endif
1943 /*
1944 * External interrupts.
1945 */
1946 else
1947 {
1948 /*
1949 * VMX: virtual interrupts takes priority over physical interrupts.
1950 * SVM: physical interrupts takes priority over virtual interrupts.
1951 */
1952 if ( fInVmxNonRootMode
1953 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1954 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1955 {
1956 /** @todo NSTVMX: virtual-interrupt delivery. */
1957 rc2 = VINF_SUCCESS;
1958 }
1959 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1960 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1961 {
1962 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1963 if (fInVmxNonRootMode)
1964 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1965 else if (fInSvmHwvirtMode)
1966 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1967 else
1968 rc2 = VINF_NO_CHANGE;
1969
1970 if (rc2 == VINF_NO_CHANGE)
1971 {
1972 bool fInjected = false;
1973 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1974 /** @todo this really isn't nice, should properly handle this */
1975 /* Note! This can still cause a VM-exit (on Intel). */
1976 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1977 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1978 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1979 fWakeupPending = true;
1980 if ( pVM->em.s.fIemExecutesAll
1981 && ( rc2 == VINF_EM_RESCHEDULE_REM
1982 || rc2 == VINF_EM_RESCHEDULE_HM
1983 || rc2 == VINF_EM_RESCHEDULE_RAW))
1984 {
1985 rc2 = VINF_EM_RESCHEDULE;
1986 }
1987# ifdef VBOX_STRICT
1988 if (fInjected)
1989 rcIrq = rc2;
1990# endif
1991 }
1992 UPDATE_RC();
1993 }
1994 else if ( fInSvmHwvirtMode
1995 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1996 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1997 {
1998 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1999 if (rc2 == VINF_NO_CHANGE)
2000 {
2001 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2002 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2003 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2004 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2005 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2006 rc2 = VINF_EM_RESCHEDULE;
2007# ifdef VBOX_STRICT
2008 rcIrq = rc2;
2009# endif
2010 }
2011 UPDATE_RC();
2012 }
2013 }
2014 } /* CPUMGetGuestGif */
2015 }
2016#else
2017 bool fWakeupPending = false;
2018 AssertReleaseFailed();
2019 /** @todo */
2020#endif
2021
2022 /*
2023 * Allocate handy pages.
2024 */
2025 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2026 {
2027 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2028 UPDATE_RC();
2029 }
2030
2031 /*
2032 * Debugger Facility request.
2033 */
2034 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2035 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2036 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2037 {
2038 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2039 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2040 UPDATE_RC();
2041 }
2042
2043 /*
2044 * EMT Rendezvous (must be serviced before termination).
2045 */
2046 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2047 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2048 {
2049 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2050 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2051 UPDATE_RC();
2052 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2053 * stopped/reset before the next VM state change is made. We need a better
2054 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2055 * && rc >= VINF_EM_SUSPEND). */
2056 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2057 {
2058 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2059 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2060 return rc;
2061 }
2062 }
2063
2064 /*
2065 * State change request (cleared by vmR3SetStateLocked).
2066 */
2067 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2068 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2069 {
2070 VMSTATE enmState = VMR3GetState(pVM);
2071 switch (enmState)
2072 {
2073 case VMSTATE_FATAL_ERROR:
2074 case VMSTATE_FATAL_ERROR_LS:
2075 case VMSTATE_GURU_MEDITATION:
2076 case VMSTATE_GURU_MEDITATION_LS:
2077 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2078 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2079 return VINF_EM_SUSPEND;
2080
2081 case VMSTATE_DESTROYING:
2082 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2083 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2084 return VINF_EM_TERMINATE;
2085
2086 default:
2087 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2088 }
2089 }
2090
2091 /*
2092 * Out of memory? Since most of our fellow high priority actions may cause us
2093 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2094 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2095 * than us since we can terminate without allocating more memory.
2096 */
2097 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2098 {
2099 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2100 UPDATE_RC();
2101 if (rc == VINF_EM_NO_MEMORY)
2102 return rc;
2103 }
2104
2105 /*
2106 * If the virtual sync clock is still stopped, make TM restart it.
2107 */
2108 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2109 TMR3VirtualSyncFF(pVM, pVCpu);
2110
2111#ifdef DEBUG
2112 /*
2113 * Debug, pause the VM.
2114 */
2115 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2116 {
2117 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2118 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2119 return VINF_EM_SUSPEND;
2120 }
2121#endif
2122
2123 /* check that we got them all */
2124 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2125 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2126 }
2127
2128#undef UPDATE_RC
2129 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2130 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2131 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2132 return rc;
2133}
2134
2135
2136/**
2137 * Check if the preset execution time cap restricts guest execution scheduling.
2138 *
2139 * @returns true if allowed, false otherwise
2140 * @param pVM The cross context VM structure.
2141 * @param pVCpu The cross context virtual CPU structure.
2142 */
2143bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2144{
2145 uint64_t u64UserTime, u64KernelTime;
2146
2147 if ( pVM->uCpuExecutionCap != 100
2148 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2149 {
2150 uint64_t u64TimeNow = RTTimeMilliTS();
2151 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2152 {
2153 /* New time slice. */
2154 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2155 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2156 pVCpu->em.s.u64TimeSliceExec = 0;
2157 }
2158 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2159
2160 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2161 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2162 return false;
2163 }
2164 return true;
2165}
2166
2167
2168/**
2169 * Execute VM.
2170 *
2171 * This function is the main loop of the VM. The emulation thread
2172 * calls this function when the VM has been successfully constructed
2173 * and we're ready for executing the VM.
2174 *
2175 * Returning from this function means that the VM is turned off or
2176 * suspended (state already saved) and deconstruction is next in line.
2177 *
2178 * All interaction from other thread are done using forced actions
2179 * and signalling of the wait object.
2180 *
2181 * @returns VBox status code, informational status codes may indicate failure.
2182 * @param pVM The cross context VM structure.
2183 * @param pVCpu The cross context virtual CPU structure.
2184 */
2185VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2186{
2187 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2188 pVM,
2189 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2190 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2191 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2192 VM_ASSERT_EMT(pVM);
2193 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2194 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2195 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2196 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2197
2198 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2199 if (rc == 0)
2200 {
2201 /*
2202 * Start the virtual time.
2203 */
2204 TMR3NotifyResume(pVM, pVCpu);
2205
2206 /*
2207 * The Outer Main Loop.
2208 */
2209 bool fFFDone = false;
2210
2211 /* Reschedule right away to start in the right state. */
2212 rc = VINF_SUCCESS;
2213
2214 /* If resuming after a pause or a state load, restore the previous
2215 state or else we'll start executing code. Else, just reschedule. */
2216 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2217 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2218 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2219 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2220 else
2221 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2222 pVCpu->em.s.cIemThenRemInstructions = 0;
2223 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2224
2225 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2226 for (;;)
2227 {
2228 /*
2229 * Before we can schedule anything (we're here because
2230 * scheduling is required) we must service any pending
2231 * forced actions to avoid any pending action causing
2232 * immediate rescheduling upon entering an inner loop
2233 *
2234 * Do forced actions.
2235 */
2236 if ( !fFFDone
2237 && RT_SUCCESS(rc)
2238 && rc != VINF_EM_TERMINATE
2239 && rc != VINF_EM_OFF
2240 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2241 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2242 {
2243 rc = emR3ForcedActions(pVM, pVCpu, rc);
2244 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2245 }
2246 else if (fFFDone)
2247 fFFDone = false;
2248
2249#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2250 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2251#endif
2252
2253 /*
2254 * Now what to do?
2255 */
2256 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2257 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2258 switch (rc)
2259 {
2260 /*
2261 * Keep doing what we're currently doing.
2262 */
2263 case VINF_SUCCESS:
2264 break;
2265
2266 /*
2267 * Reschedule - to raw-mode execution.
2268 */
2269/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2270 case VINF_EM_RESCHEDULE_RAW:
2271 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2272 AssertLogRelFailed();
2273 pVCpu->em.s.enmState = EMSTATE_NONE;
2274 break;
2275
2276 /*
2277 * Reschedule - to HM or NEM.
2278 */
2279 case VINF_EM_RESCHEDULE_HM:
2280 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2281#if !defined(VBOX_VMM_TARGET_ARMV8)
2282 if (VM_IS_HM_ENABLED(pVM))
2283 {
2284 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2285 {
2286 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2287 pVCpu->em.s.enmState = EMSTATE_HM;
2288 }
2289 else
2290 {
2291 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_IEM_THEN_REM)\n", enmOldState, EMSTATE_IEM_THEN_REM));
2292 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2293 }
2294 }
2295 else
2296#endif
2297 if (VM_IS_NEM_ENABLED(pVM))
2298 {
2299 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2300 pVCpu->em.s.enmState = EMSTATE_NEM;
2301 }
2302 else
2303 {
2304 AssertLogRelFailed();
2305 pVCpu->em.s.enmState = EMSTATE_NONE;
2306 }
2307 break;
2308
2309 /*
2310 * Reschedule - to recompiled execution.
2311 */
2312 case VINF_EM_RESCHEDULE_REM:
2313 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2314 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2315 enmOldState, EMSTATE_IEM_THEN_REM));
2316 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2317 {
2318 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2319 pVCpu->em.s.cIemThenRemInstructions = 0;
2320 }
2321 break;
2322
2323 /*
2324 * Resume.
2325 */
2326 case VINF_EM_RESUME:
2327 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2328 /* Don't reschedule in the halted or wait for SIPI case. */
2329 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2330 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2331 {
2332 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2333 break;
2334 }
2335 /* fall through and get scheduled. */
2336 RT_FALL_THRU();
2337
2338 /*
2339 * Reschedule.
2340 */
2341 case VINF_EM_RESCHEDULE:
2342 {
2343 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2344 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2345 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2346 pVCpu->em.s.cIemThenRemInstructions = 0;
2347 pVCpu->em.s.enmState = enmState;
2348 break;
2349 }
2350
2351 /*
2352 * Halted.
2353 */
2354 case VINF_EM_HALT:
2355 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2356 pVCpu->em.s.enmState = EMSTATE_HALTED;
2357 break;
2358
2359 /*
2360 * Switch to the wait for SIPI state (application processor only)
2361 */
2362 case VINF_EM_WAIT_SIPI:
2363 Assert(pVCpu->idCpu != 0);
2364 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2365 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2366 break;
2367
2368
2369 /*
2370 * Suspend.
2371 */
2372 case VINF_EM_SUSPEND:
2373 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2374 Assert(enmOldState != EMSTATE_SUSPENDED);
2375 pVCpu->em.s.enmPrevState = enmOldState;
2376 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2377 break;
2378
2379 /*
2380 * Reset.
2381 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2382 */
2383 case VINF_EM_RESET:
2384 {
2385 if (pVCpu->idCpu == 0)
2386 {
2387 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2388 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2389 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2390 pVCpu->em.s.cIemThenRemInstructions = 0;
2391 pVCpu->em.s.enmState = enmState;
2392 }
2393 else
2394 {
2395 /* All other VCPUs go into the wait for SIPI state. */
2396 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2397 }
2398 break;
2399 }
2400
2401 /*
2402 * Power Off.
2403 */
2404 case VINF_EM_OFF:
2405 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2406 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2407 TMR3NotifySuspend(pVM, pVCpu);
2408 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2409 return rc;
2410
2411 /*
2412 * Terminate the VM.
2413 */
2414 case VINF_EM_TERMINATE:
2415 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2416 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2417 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2418 TMR3NotifySuspend(pVM, pVCpu);
2419 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2420 return rc;
2421
2422
2423 /*
2424 * Out of memory, suspend the VM and stuff.
2425 */
2426 case VINF_EM_NO_MEMORY:
2427 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2428 Assert(enmOldState != EMSTATE_SUSPENDED);
2429 pVCpu->em.s.enmPrevState = enmOldState;
2430 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2431 TMR3NotifySuspend(pVM, pVCpu);
2432 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2433
2434 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2435 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2436 if (rc != VINF_EM_SUSPEND)
2437 {
2438 if (RT_SUCCESS_NP(rc))
2439 {
2440 AssertLogRelMsgFailed(("%Rrc\n", rc));
2441 rc = VERR_EM_INTERNAL_ERROR;
2442 }
2443 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2444 }
2445 return rc;
2446
2447 /*
2448 * Guest debug events.
2449 */
2450 case VINF_EM_DBG_STEPPED:
2451 case VINF_EM_DBG_STOP:
2452 case VINF_EM_DBG_EVENT:
2453 case VINF_EM_DBG_BREAKPOINT:
2454 case VINF_EM_DBG_STEP:
2455 if (enmOldState == EMSTATE_RAW)
2456 {
2457 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2458 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2459 }
2460 else if (enmOldState == EMSTATE_HM)
2461 {
2462 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2463 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2464 }
2465 else if (enmOldState == EMSTATE_NEM)
2466 {
2467 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2468 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2469 }
2470 else if (enmOldState == EMSTATE_REM)
2471 {
2472 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2473 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2474 }
2475 else
2476 {
2477 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2478 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2479 }
2480 break;
2481
2482 /*
2483 * Hypervisor debug events.
2484 */
2485 case VINF_EM_DBG_HYPER_STEPPED:
2486 case VINF_EM_DBG_HYPER_BREAKPOINT:
2487 case VINF_EM_DBG_HYPER_ASSERTION:
2488 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2489 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2490 break;
2491
2492 /*
2493 * Triple fault.
2494 */
2495 case VINF_EM_TRIPLE_FAULT:
2496 if (!pVM->em.s.fGuruOnTripleFault)
2497 {
2498 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2499 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2500 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2501 continue;
2502 }
2503 /* Else fall through and trigger a guru. */
2504 RT_FALL_THRU();
2505
2506 case VERR_VMM_RING0_ASSERTION:
2507 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2508 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2509 break;
2510
2511 /*
2512 * Any error code showing up here other than the ones we
2513 * know and process above are considered to be FATAL.
2514 *
2515 * Unknown warnings and informational status codes are also
2516 * included in this.
2517 */
2518 default:
2519 if (RT_SUCCESS_NP(rc))
2520 {
2521 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2522 rc = VERR_EM_INTERNAL_ERROR;
2523 }
2524 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2525 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2526 break;
2527 }
2528
2529 /*
2530 * Act on state transition.
2531 */
2532 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2533 if (enmOldState != enmNewState)
2534 {
2535 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2536
2537 /* Clear MWait flags and the unhalt FF. */
2538 if ( enmOldState == EMSTATE_HALTED
2539 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2540 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2541 && ( enmNewState == EMSTATE_RAW
2542 || enmNewState == EMSTATE_HM
2543 || enmNewState == EMSTATE_NEM
2544 || enmNewState == EMSTATE_REM
2545 || enmNewState == EMSTATE_IEM_THEN_REM
2546 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2547 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2548 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2549 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2550 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2551 {
2552 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2553 {
2554 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2555 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2556 }
2557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2558 {
2559 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2560 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2561 }
2562 }
2563 }
2564 else
2565 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2566
2567 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2568 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2569
2570 /*
2571 * Act on the new state.
2572 */
2573 switch (enmNewState)
2574 {
2575 /*
2576 * Execute raw.
2577 */
2578 case EMSTATE_RAW:
2579 AssertLogRelMsgFailed(("%Rrc\n", rc));
2580 rc = VERR_EM_INTERNAL_ERROR;
2581 break;
2582
2583 /*
2584 * Execute hardware accelerated raw.
2585 */
2586 case EMSTATE_HM:
2587#if defined(VBOX_VMM_TARGET_ARMV8)
2588 AssertReleaseFailed(); /* Should never get here. */
2589#else
2590 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2591#endif
2592 break;
2593
2594 /*
2595 * Execute hardware accelerated raw.
2596 */
2597 case EMSTATE_NEM:
2598 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2599 break;
2600
2601 /*
2602 * Execute recompiled.
2603 */
2604 case EMSTATE_REM:
2605 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2606 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2607 break;
2608
2609 /*
2610 * Execute in the interpreter.
2611 */
2612 case EMSTATE_IEM:
2613 {
2614 uint32_t cInstructions = 0;
2615#if 0 /* For testing purposes. */
2616 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2617 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2618 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2619 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2620 rc = VINF_SUCCESS;
2621 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2622#endif
2623 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2624 if (pVM->em.s.fIemExecutesAll)
2625 {
2626 Assert(rc != VINF_EM_RESCHEDULE_REM);
2627 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2628 Assert(rc != VINF_EM_RESCHEDULE_HM);
2629#ifdef VBOX_HIGH_RES_TIMERS_HACK
2630 if (cInstructions < 2048)
2631 TMTimerPollVoid(pVM, pVCpu);
2632#endif
2633 }
2634 fFFDone = false;
2635 break;
2636 }
2637
2638 /*
2639 * Execute in IEM, hoping we can quickly switch aback to HM
2640 * or RAW execution. If our hopes fail, we go to REM.
2641 */
2642 case EMSTATE_IEM_THEN_REM:
2643 {
2644 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2645 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2646 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2647 break;
2648 }
2649
2650 /*
2651 * Application processor execution halted until SIPI.
2652 */
2653 case EMSTATE_WAIT_SIPI:
2654 /* no break */
2655 /*
2656 * hlt - execution halted until interrupt.
2657 */
2658 case EMSTATE_HALTED:
2659 {
2660 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2661 /* If HM (or someone else) store a pending interrupt in
2662 TRPM, it must be dispatched ASAP without any halting.
2663 Anything pending in TRPM has been accepted and the CPU
2664 should already be the right state to receive it. */
2665 if (TRPMHasTrap(pVCpu))
2666 rc = VINF_EM_RESCHEDULE;
2667 /* MWAIT has a special extension where it's woken up when
2668 an interrupt is pending even when IF=0. */
2669 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2670 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2671 {
2672 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2673 if (rc == VINF_SUCCESS)
2674 {
2675#if defined(VBOX_VMM_TARGET_ARMV8)
2676 AssertReleaseFailed();
2677#else
2678 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2679 APICUpdatePendingInterrupts(pVCpu);
2680#endif
2681
2682 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2683 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2684 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2685 {
2686 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2687 rc = VINF_EM_RESCHEDULE;
2688 }
2689 }
2690 }
2691 else
2692 {
2693#if defined(VBOX_VMM_TARGET_ARMV8)
2694 bool fIgnoreInterrupts = false;
2695 AssertReleaseFailed();
2696#else
2697 bool fIgnoreInterrupts = !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF);
2698#endif
2699 rc = VMR3WaitHalted(pVM, pVCpu, fIgnoreInterrupts);
2700 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2701 check VMCPU_FF_UPDATE_APIC here. */
2702 if ( rc == VINF_SUCCESS
2703 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2704 {
2705 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2706 rc = VINF_EM_RESCHEDULE;
2707 }
2708 }
2709
2710 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2711 break;
2712 }
2713
2714 /*
2715 * Suspended - return to VM.cpp.
2716 */
2717 case EMSTATE_SUSPENDED:
2718 TMR3NotifySuspend(pVM, pVCpu);
2719 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2720 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2721 return VINF_EM_SUSPEND;
2722
2723 /*
2724 * Debugging in the guest.
2725 */
2726 case EMSTATE_DEBUG_GUEST_RAW:
2727 case EMSTATE_DEBUG_GUEST_HM:
2728 case EMSTATE_DEBUG_GUEST_NEM:
2729 case EMSTATE_DEBUG_GUEST_IEM:
2730 case EMSTATE_DEBUG_GUEST_REM:
2731 TMR3NotifySuspend(pVM, pVCpu);
2732 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2733 TMR3NotifyResume(pVM, pVCpu);
2734 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2735 break;
2736
2737 /*
2738 * Debugging in the hypervisor.
2739 */
2740 case EMSTATE_DEBUG_HYPER:
2741 {
2742 TMR3NotifySuspend(pVM, pVCpu);
2743 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2744
2745 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2746 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2747 if (rc != VINF_SUCCESS)
2748 {
2749 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2750 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2751 else
2752 {
2753 /* switch to guru meditation mode */
2754 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2755 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2756 VMMR3FatalDump(pVM, pVCpu, rc);
2757 }
2758 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2759 return rc;
2760 }
2761
2762 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2763 TMR3NotifyResume(pVM, pVCpu);
2764 break;
2765 }
2766
2767 /*
2768 * Guru meditation takes place in the debugger.
2769 */
2770 case EMSTATE_GURU_MEDITATION:
2771 {
2772 TMR3NotifySuspend(pVM, pVCpu);
2773 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2774 VMMR3FatalDump(pVM, pVCpu, rc);
2775 emR3Debug(pVM, pVCpu, rc);
2776 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2777 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2778 return rc;
2779 }
2780
2781 /*
2782 * The states we don't expect here.
2783 */
2784 case EMSTATE_NONE:
2785 case EMSTATE_TERMINATING:
2786 default:
2787 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2788 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2789 TMR3NotifySuspend(pVM, pVCpu);
2790 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2791 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2792 return VERR_EM_INTERNAL_ERROR;
2793 }
2794 } /* The Outer Main Loop */
2795 }
2796 else
2797 {
2798 /*
2799 * Fatal error.
2800 */
2801 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2802 TMR3NotifySuspend(pVM, pVCpu);
2803 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2804 VMMR3FatalDump(pVM, pVCpu, rc);
2805 emR3Debug(pVM, pVCpu, rc);
2806 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2807 /** @todo change the VM state! */
2808 return rc;
2809 }
2810
2811 /* not reached */
2812}
2813
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette