VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 21191

最後變更 在這個檔案從21191是 21191,由 vboxsync 提交於 16 年 前

Split up RC handling for raw and hwacc modes.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 155.8 KB
 
1/* $Id: EM.cpp 21191 2009-07-03 11:39:50Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <VBox/log.h>
71#include <iprt/thread.h>
72#include <iprt/assert.h>
73#include <iprt/asm.h>
74#include <iprt/semaphore.h>
75#include <iprt/string.h>
76#include <iprt/avl.h>
77#include <iprt/stream.h>
78#include <VBox/param.h>
79#include <VBox/err.h>
80
81
82/*******************************************************************************
83* Defined Constants And Macros *
84*******************************************************************************/
85#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
86#define EM_NOTIFY_HWACCM
87#endif
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
94static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
95static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
96static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);
99static int emR3RawStep(PVM pVM, PVMCPU pVCpu);
100DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
101static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
102static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
103DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
104static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
105static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
106static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
107static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
108static int emR3SingleStepExecRem(PVM pVM, uint32_t cIterations);
109static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu);
110static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
111static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu);
112static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
113
114#define EMHANDLERC_WITH_PATM
115#define EMHANDLERC_NAME emR3RawHandleRC
116#include "EMHandleRCTmpl.h"
117
118#define EMHANDLERC_NAME emR3HwaccmHandleRC
119#include "EMHandleRCTmpl.h"
120
121/**
122 * Initializes the EM.
123 *
124 * @returns VBox status code.
125 * @param pVM The VM to operate on.
126 */
127VMMR3DECL(int) EMR3Init(PVM pVM)
128{
129 LogFlow(("EMR3Init\n"));
130 /*
131 * Assert alignment and sizes.
132 */
133 AssertCompileMemberAlignment(VM, em.s, 32);
134 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
135 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
136 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
137
138 /*
139 * Init the structure.
140 */
141 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
142 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
143 if (RT_FAILURE(rc))
144 pVM->fRawR3Enabled = true;
145 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
146 if (RT_FAILURE(rc))
147 pVM->fRawR0Enabled = true;
148 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
149
150 /*
151 * Initialize the REM critical section.
152 */
153 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, "EM-REM");
154 AssertRCReturn(rc, rc);
155
156 /*
157 * Saved state.
158 */
159 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
160 NULL, emR3Save, NULL,
161 NULL, emR3Load, NULL);
162 if (RT_FAILURE(rc))
163 return rc;
164
165 for (unsigned i=0;i<pVM->cCPUs;i++)
166 {
167 PVMCPU pVCpu = &pVM->aCpus[i];
168
169 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
170
171 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
172 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
173 pVCpu->em.s.fForceRAW = false;
174
175 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
176 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
177 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
178
179# define EM_REG_COUNTER(a, b, c) \
180 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
181 AssertRC(rc);
182
183# define EM_REG_COUNTER_USED(a, b, c) \
184 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
185 AssertRC(rc);
186
187# define EM_REG_PROFILE(a, b, c) \
188 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
189 AssertRC(rc);
190
191# define EM_REG_PROFILE_ADV(a, b, c) \
192 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
193 AssertRC(rc);
194
195 /*
196 * Statistics.
197 */
198#ifdef VBOX_WITH_STATISTICS
199 PEMSTATS pStats;
200 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
201 if (RT_FAILURE(rc))
202 return rc;
203
204 pVCpu->em.s.pStatsR3 = pStats;
205 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
206 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
207
208 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
209 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
210
211 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
212 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
213
214 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
286
287 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
288 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
289
290 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
340
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
369
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
374
375 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
377 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
378 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
379 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
380 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
381 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
382 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
383 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
384 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
385 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
386 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
387 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
388 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
389 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
390 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
391 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
392 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
394 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
401
402 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
403 pVCpu->em.s.pCliStatTree = 0;
404
405 /* these should be considered for release statistics. */
406 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
407 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
408 EM_REG_COUNTER(&pVCpu->em.s.StatMiscEmu, "/PROF/CPU%d/EM/Emulation/Misc", "Profiling of emR3RawExecuteInstruction.");
409 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
410 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
411 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
412 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
413 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
414 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
415 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
416 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
417
418#endif /* VBOX_WITH_STATISTICS */
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
421 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
422 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
423 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
424
425 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
426 }
427
428 return VINF_SUCCESS;
429}
430
431
432/**
433 * Initializes the per-VCPU EM.
434 *
435 * @returns VBox status code.
436 * @param pVM The VM to operate on.
437 */
438VMMR3DECL(int) EMR3InitCPU(PVM pVM)
439{
440 LogFlow(("EMR3InitCPU\n"));
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Applies relocations to data and code managed by this
447 * component. This function will be called at init and
448 * whenever the VMM need to relocate it self inside the GC.
449 *
450 * @param pVM The VM.
451 */
452VMMR3DECL(void) EMR3Relocate(PVM pVM)
453{
454 LogFlow(("EMR3Relocate\n"));
455 for (unsigned i=0;i<pVM->cCPUs;i++)
456 {
457 PVMCPU pVCpu = &pVM->aCpus[i];
458
459 if (pVCpu->em.s.pStatsR3)
460 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
461 }
462}
463
464
465/**
466 * Reset notification.
467 *
468 * @param pVM
469 */
470VMMR3DECL(void) EMR3Reset(PVM pVM)
471{
472 LogFlow(("EMR3Reset: \n"));
473 for (unsigned i=0;i<pVM->cCPUs;i++)
474 {
475 PVMCPU pVCpu = &pVM->aCpus[i];
476
477 pVCpu->em.s.fForceRAW = false;
478 }
479}
480
481
482/**
483 * Terminates the EM.
484 *
485 * Termination means cleaning up and freeing all resources,
486 * the VM it self is at this point powered off or suspended.
487 *
488 * @returns VBox status code.
489 * @param pVM The VM to operate on.
490 */
491VMMR3DECL(int) EMR3Term(PVM pVM)
492{
493 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
494
495 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
496 return VINF_SUCCESS;
497}
498
499/**
500 * Terminates the per-VCPU EM.
501 *
502 * Termination means cleaning up and freeing all resources,
503 * the VM it self is at this point powered off or suspended.
504 *
505 * @returns VBox status code.
506 * @param pVM The VM to operate on.
507 */
508VMMR3DECL(int) EMR3TermCPU(PVM pVM)
509{
510 return 0;
511}
512
513/**
514 * Execute state save operation.
515 *
516 * @returns VBox status code.
517 * @param pVM VM Handle.
518 * @param pSSM SSM operation handle.
519 */
520static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
521{
522 for (VMCPUID i = 0; i < pVM->cCPUs; i++)
523 {
524 PVMCPU pVCpu = &pVM->aCpus[i];
525
526 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
527 AssertRCReturn(rc, rc);
528
529 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
530 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
531 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
532 AssertRCReturn(rc, rc);
533 }
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * Execute state load operation.
540 *
541 * @returns VBox status code.
542 * @param pVM VM Handle.
543 * @param pSSM SSM operation handle.
544 * @param u32Version Data layout version.
545 */
546static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
547{
548 int rc = VINF_SUCCESS;
549
550 /*
551 * Validate version.
552 */
553 if ( u32Version != EM_SAVED_STATE_VERSION
554 && u32Version != EM_SAVED_STATE_VERSION_PRE_SMP)
555 {
556 AssertMsgFailed(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
557 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
558 }
559
560 /*
561 * Load the saved state.
562 */
563 for (VMCPUID i = 0; i < pVM->cCPUs; i++)
564 {
565 PVMCPU pVCpu = &pVM->aCpus[i];
566
567 rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
568 if (RT_FAILURE(rc))
569 pVCpu->em.s.fForceRAW = false;
570
571 if (u32Version > EM_SAVED_STATE_VERSION_PRE_SMP)
572 {
573 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
574 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
575 AssertRCReturn(rc, rc);
576 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
577
578 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
579 }
580 Assert(!pVCpu->em.s.pCliStatTree);
581 }
582 return rc;
583}
584
585
586/**
587 * Enables or disables a set of raw-mode execution modes.
588 *
589 * @returns VINF_SUCCESS on success.
590 * @returns VINF_RESCHEDULE if a rescheduling might be required.
591 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
592 *
593 * @param pVM The VM to operate on.
594 * @param enmMode The execution mode change.
595 * @thread The emulation thread.
596 */
597VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
598{
599 switch (enmMode)
600 {
601 case EMRAW_NONE:
602 pVM->fRawR3Enabled = false;
603 pVM->fRawR0Enabled = false;
604 break;
605 case EMRAW_RING3_ENABLE:
606 pVM->fRawR3Enabled = true;
607 break;
608 case EMRAW_RING3_DISABLE:
609 pVM->fRawR3Enabled = false;
610 break;
611 case EMRAW_RING0_ENABLE:
612 pVM->fRawR0Enabled = true;
613 break;
614 case EMRAW_RING0_DISABLE:
615 pVM->fRawR0Enabled = false;
616 break;
617 default:
618 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
619 return VERR_INVALID_PARAMETER;
620 }
621 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
622 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
623 return pVM->aCpus[0].em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
624}
625
626
627/**
628 * Raise a fatal error.
629 *
630 * Safely terminate the VM with full state report and stuff. This function
631 * will naturally never return.
632 *
633 * @param pVCpu VMCPU handle.
634 * @param rc VBox status code.
635 */
636VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
637{
638 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
639 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
640 AssertReleaseMsgFailed(("longjmp returned!\n"));
641}
642
643
644/**
645 * Gets the EM state name.
646 *
647 * @returns pointer to read only state name,
648 * @param enmState The state.
649 */
650VMMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
651{
652 switch (enmState)
653 {
654 case EMSTATE_NONE: return "EMSTATE_NONE";
655 case EMSTATE_RAW: return "EMSTATE_RAW";
656 case EMSTATE_HWACC: return "EMSTATE_HWACC";
657 case EMSTATE_REM: return "EMSTATE_REM";
658 case EMSTATE_PARAV: return "EMSTATE_PARAV";
659 case EMSTATE_HALTED: return "EMSTATE_HALTED";
660 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
661 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
662 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
663 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
664 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
665 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
666 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
667 default: return "Unknown!";
668 }
669}
670
671
672#ifdef VBOX_WITH_STATISTICS
673/**
674 * Just a braindead function to keep track of cli addresses.
675 * @param pVM VM handle.
676 * @param pVMCPU VMCPU handle.
677 * @param GCPtrInstr The EIP of the cli instruction.
678 */
679static void emR3RecordCli(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrInstr)
680{
681 PCLISTAT pRec;
682
683 pRec = (PCLISTAT)RTAvlPVGet(&pVCpu->em.s.pCliStatTree, (AVLPVKEY)GCPtrInstr);
684 if (!pRec)
685 {
686 /* New cli instruction; insert into the tree. */
687 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
688 Assert(pRec);
689 if (!pRec)
690 return;
691 pRec->Core.Key = (AVLPVKEY)GCPtrInstr;
692
693 char szCliStatName[32];
694 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%RGv", GCPtrInstr);
695 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
696
697 bool fRc = RTAvlPVInsert(&pVCpu->em.s.pCliStatTree, &pRec->Core);
698 Assert(fRc); NOREF(fRc);
699 }
700 STAM_COUNTER_INC(&pRec->Counter);
701 STAM_COUNTER_INC(&pVCpu->em.s.StatTotalClis);
702}
703#endif /* VBOX_WITH_STATISTICS */
704
705
706/**
707 * Debug loop.
708 *
709 * @returns VBox status code for EM.
710 * @param pVM VM handle.
711 * @param pVCpu VMCPU handle.
712 * @param rc Current EM VBox status code..
713 */
714static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
715{
716 for (;;)
717 {
718 Log(("emR3Debug: rc=%Rrc\n", rc));
719 const int rcLast = rc;
720
721 /*
722 * Debug related RC.
723 */
724 switch (rc)
725 {
726 /*
727 * Single step an instruction.
728 */
729 case VINF_EM_DBG_STEP:
730 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
731 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
732 || pVCpu->em.s.fForceRAW /* paranoia */)
733 rc = emR3RawStep(pVM, pVCpu);
734 else
735 {
736 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
737 rc = emR3RemStep(pVM, pVCpu);
738 }
739 break;
740
741 /*
742 * Simple events: stepped, breakpoint, stop/assertion.
743 */
744 case VINF_EM_DBG_STEPPED:
745 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
746 break;
747
748 case VINF_EM_DBG_BREAKPOINT:
749 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
750 break;
751
752 case VINF_EM_DBG_STOP:
753 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
754 break;
755
756 case VINF_EM_DBG_HYPER_STEPPED:
757 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
758 break;
759
760 case VINF_EM_DBG_HYPER_BREAKPOINT:
761 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
762 break;
763
764 case VINF_EM_DBG_HYPER_ASSERTION:
765 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
766 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
767 break;
768
769 /*
770 * Guru meditation.
771 */
772 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
773 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
774 break;
775 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
776 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
777 break;
778
779 default: /** @todo don't use default for guru, but make special errors code! */
780 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
781 break;
782 }
783
784 /*
785 * Process the result.
786 */
787 do
788 {
789 switch (rc)
790 {
791 /*
792 * Continue the debugging loop.
793 */
794 case VINF_EM_DBG_STEP:
795 case VINF_EM_DBG_STOP:
796 case VINF_EM_DBG_STEPPED:
797 case VINF_EM_DBG_BREAKPOINT:
798 case VINF_EM_DBG_HYPER_STEPPED:
799 case VINF_EM_DBG_HYPER_BREAKPOINT:
800 case VINF_EM_DBG_HYPER_ASSERTION:
801 break;
802
803 /*
804 * Resuming execution (in some form) has to be done here if we got
805 * a hypervisor debug event.
806 */
807 case VINF_SUCCESS:
808 case VINF_EM_RESUME:
809 case VINF_EM_SUSPEND:
810 case VINF_EM_RESCHEDULE:
811 case VINF_EM_RESCHEDULE_RAW:
812 case VINF_EM_RESCHEDULE_REM:
813 case VINF_EM_HALT:
814 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
815 {
816 rc = emR3RawResumeHyper(pVM, pVCpu);
817 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
818 continue;
819 }
820 if (rc == VINF_SUCCESS)
821 rc = VINF_EM_RESCHEDULE;
822 return rc;
823
824 /*
825 * The debugger isn't attached.
826 * We'll simply turn the thing off since that's the easiest thing to do.
827 */
828 case VERR_DBGF_NOT_ATTACHED:
829 switch (rcLast)
830 {
831 case VINF_EM_DBG_HYPER_STEPPED:
832 case VINF_EM_DBG_HYPER_BREAKPOINT:
833 case VINF_EM_DBG_HYPER_ASSERTION:
834 case VERR_TRPM_PANIC:
835 case VERR_TRPM_DONT_PANIC:
836 case VERR_VMM_RING0_ASSERTION:
837 case VERR_VMM_HYPER_CR3_MISMATCH:
838 case VERR_VMM_RING3_CALL_DISABLED:
839 return rcLast;
840 }
841 return VINF_EM_OFF;
842
843 /*
844 * Status codes terminating the VM in one or another sense.
845 */
846 case VINF_EM_TERMINATE:
847 case VINF_EM_OFF:
848 case VINF_EM_RESET:
849 case VINF_EM_NO_MEMORY:
850 case VINF_EM_RAW_STALE_SELECTOR:
851 case VINF_EM_RAW_IRET_TRAP:
852 case VERR_TRPM_PANIC:
853 case VERR_TRPM_DONT_PANIC:
854 case VERR_VMM_RING0_ASSERTION:
855 case VERR_VMM_HYPER_CR3_MISMATCH:
856 case VERR_VMM_RING3_CALL_DISABLED:
857 case VERR_INTERNAL_ERROR:
858 case VERR_INTERNAL_ERROR_2:
859 case VERR_INTERNAL_ERROR_3:
860 case VERR_INTERNAL_ERROR_4:
861 case VERR_INTERNAL_ERROR_5:
862 case VERR_IPE_UNEXPECTED_STATUS:
863 case VERR_IPE_UNEXPECTED_INFO_STATUS:
864 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
865 return rc;
866
867 /*
868 * The rest is unexpected, and will keep us here.
869 */
870 default:
871 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
872 break;
873 }
874 } while (false);
875 } /* debug for ever */
876}
877
878/**
879 * Steps recompiled code.
880 *
881 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
882 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
883 *
884 * @param pVM VM handle.
885 * @param pVCpu VMCPU handle.
886 */
887static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
888{
889 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
890
891 EMRemLock(pVM);
892
893 /*
894 * Switch to REM, step instruction, switch back.
895 */
896 int rc = REMR3State(pVM, pVCpu);
897 if (RT_SUCCESS(rc))
898 {
899 rc = REMR3Step(pVM, pVCpu);
900 REMR3StateBack(pVM, pVCpu);
901 }
902 EMRemUnlock(pVM);
903
904 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
905 return rc;
906}
907
908
909/**
910 * Executes recompiled code.
911 *
912 * This function contains the recompiler version of the inner
913 * execution loop (the outer loop being in EMR3ExecuteVM()).
914 *
915 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
916 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
917 *
918 * @param pVM VM handle.
919 * @param pVCpu VMCPU handle.
920 * @param pfFFDone Where to store an indicator telling wheter or not
921 * FFs were done before returning.
922 *
923 */
924static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
925{
926#ifdef LOG_ENABLED
927 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
928 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
929
930 if (pCtx->eflags.Bits.u1VM)
931 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
932 else
933 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
934#endif
935 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
936
937#if defined(VBOX_STRICT) && defined(DEBUG_bird)
938 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
939 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
940 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
941#endif
942
943 /* Big lock, but you are not supposed to own any lock when coming in here. */
944 EMRemLock(pVM);
945
946 /*
947 * Spin till we get a forced action which returns anything but VINF_SUCCESS
948 * or the REM suggests raw-mode execution.
949 */
950 *pfFFDone = false;
951 bool fInREMState = false;
952 int rc = VINF_SUCCESS;
953
954 /* Flush the recompiler TLB if the VCPU has changed. */
955 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
956 {
957 REMFlushTBs(pVM);
958 /* Also sync the entire state. */
959 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
960 }
961 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
962
963 for (;;)
964 {
965 /*
966 * Update REM state if not already in sync.
967 */
968 if (!fInREMState)
969 {
970 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
971 rc = REMR3State(pVM, pVCpu);
972 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
973 if (RT_FAILURE(rc))
974 break;
975 fInREMState = true;
976
977 /*
978 * We might have missed the raising of VMREQ, TIMER and some other
979 * imporant FFs while we were busy switching the state. So, check again.
980 */
981 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
982 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
983 {
984 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
985 goto l_REMDoForcedActions;
986 }
987 }
988
989
990 /*
991 * Execute REM.
992 */
993 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
994 rc = REMR3Run(pVM, pVCpu);
995 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
996
997
998 /*
999 * Deal with high priority post execution FFs before doing anything else.
1000 */
1001 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1002 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1003 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1004
1005 /*
1006 * Process the returned status code.
1007 * (Try keep this short! Call functions!)
1008 */
1009 if (rc != VINF_SUCCESS)
1010 {
1011 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1012 break;
1013 if (rc != VINF_REM_INTERRUPED_FF)
1014 {
1015 /*
1016 * Anything which is not known to us means an internal error
1017 * and the termination of the VM!
1018 */
1019 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1020 break;
1021 }
1022 }
1023
1024
1025 /*
1026 * Check and execute forced actions.
1027 * Sync back the VM state before calling any of these.
1028 */
1029#ifdef VBOX_HIGH_RES_TIMERS_HACK
1030 TMTimerPollVoid(pVM, pVCpu);
1031#endif
1032 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1033 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1034 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1035 {
1036l_REMDoForcedActions:
1037 if (fInREMState)
1038 {
1039 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, d);
1040 REMR3StateBack(pVM, pVCpu);
1041 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, d);
1042 fInREMState = false;
1043 }
1044 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1045 rc = emR3ForcedActions(pVM, pVCpu, rc);
1046 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1047 if ( rc != VINF_SUCCESS
1048 && rc != VINF_EM_RESCHEDULE_REM)
1049 {
1050 *pfFFDone = true;
1051 break;
1052 }
1053 }
1054
1055 } /* The Inner Loop, recompiled execution mode version. */
1056
1057
1058 /*
1059 * Returning. Sync back the VM state if required.
1060 */
1061 if (fInREMState)
1062 {
1063 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, e);
1064 REMR3StateBack(pVM, pVCpu);
1065 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, e);
1066 }
1067 EMRemUnlock(pVM);
1068
1069 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1070 return rc;
1071}
1072
1073
1074/**
1075 * Resumes executing hypervisor after a debug event.
1076 *
1077 * This is kind of special since our current guest state is
1078 * potentially out of sync.
1079 *
1080 * @returns VBox status code.
1081 * @param pVM The VM handle.
1082 * @param pVCpu The VMCPU handle.
1083 */
1084static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)
1085{
1086 int rc;
1087 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1088 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER);
1089 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
1090
1091 /*
1092 * Resume execution.
1093 */
1094 CPUMRawEnter(pVCpu, NULL);
1095 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
1096 rc = VMMR3ResumeHyper(pVM, pVCpu);
1097 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
1098 rc = CPUMRawLeave(pVCpu, NULL, rc);
1099 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1100
1101 /*
1102 * Deal with the return code.
1103 */
1104 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1105 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1106 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1107 return rc;
1108}
1109
1110
1111/**
1112 * Steps rawmode.
1113 *
1114 * @returns VBox status code.
1115 * @param pVM The VM handle.
1116 * @param pVCpu The VMCPU handle.
1117 */
1118static int emR3RawStep(PVM pVM, PVMCPU pVCpu)
1119{
1120 Assert( pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
1121 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
1122 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
1123 int rc;
1124 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1125 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
1126#ifndef DEBUG_sandervl
1127 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1128 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
1129#endif
1130 if (fGuest)
1131 {
1132 /*
1133 * Check vital forced actions, but ignore pending interrupts and timers.
1134 */
1135 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1136 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1137 {
1138 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1139 if (rc != VINF_SUCCESS)
1140 return rc;
1141 }
1142
1143 /*
1144 * Set flags for single stepping.
1145 */
1146 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1147 }
1148 else
1149 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1150
1151 /*
1152 * Single step.
1153 * We do not start time or anything, if anything we should just do a few nanoseconds.
1154 */
1155 CPUMRawEnter(pVCpu, NULL);
1156 do
1157 {
1158 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1159 rc = VMMR3ResumeHyper(pVM, pVCpu);
1160 else
1161 rc = VMMR3RawRunGC(pVM, pVCpu);
1162#ifndef DEBUG_sandervl
1163 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
1164 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
1165#endif
1166 } while ( rc == VINF_SUCCESS
1167 || rc == VINF_EM_RAW_INTERRUPT);
1168 rc = CPUMRawLeave(pVCpu, NULL, rc);
1169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1170
1171 /*
1172 * Make sure the trap flag is cleared.
1173 * (Too bad if the guest is trying to single step too.)
1174 */
1175 if (fGuest)
1176 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1177 else
1178 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) & ~X86_EFL_TF);
1179
1180 /*
1181 * Deal with the return codes.
1182 */
1183 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1184 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1185 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1186 return rc;
1187}
1188
1189
1190#ifdef DEBUG
1191
1192/**
1193 * Steps hardware accelerated mode.
1194 *
1195 * @returns VBox status code.
1196 * @param pVM The VM handle.
1197 * @param pVCpu The VMCPU handle.
1198 */
1199static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)
1200{
1201 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
1202
1203 int rc;
1204 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1205 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
1206
1207 /*
1208 * Check vital forced actions, but ignore pending interrupts and timers.
1209 */
1210 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1211 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1212 {
1213 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1214 if (rc != VINF_SUCCESS)
1215 return rc;
1216 }
1217 /*
1218 * Set flags for single stepping.
1219 */
1220 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
1221
1222 /*
1223 * Single step.
1224 * We do not start time or anything, if anything we should just do a few nanoseconds.
1225 */
1226 do
1227 {
1228 rc = VMMR3HwAccRunGC(pVM, pVCpu);
1229 } while ( rc == VINF_SUCCESS
1230 || rc == VINF_EM_RAW_INTERRUPT);
1231 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1232
1233 /*
1234 * Make sure the trap flag is cleared.
1235 * (Too bad if the guest is trying to single step too.)
1236 */
1237 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1238
1239 /*
1240 * Deal with the return codes.
1241 */
1242 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1243 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
1244 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1245 return rc;
1246}
1247
1248
1249int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1250{
1251 int rc = VINF_SUCCESS;
1252 EMSTATE enmOldState = pVCpu->em.s.enmState;
1253 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1254
1255 Log(("Single step BEGIN:\n"));
1256 for (uint32_t i = 0; i < cIterations; i++)
1257 {
1258 DBGFR3PrgStep(pVCpu);
1259 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1260 rc = emR3RawStep(pVM, pVCpu);
1261 if (rc != VINF_SUCCESS)
1262 break;
1263 }
1264 Log(("Single step END: rc=%Rrc\n", rc));
1265 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1266 pVCpu->em.s.enmState = enmOldState;
1267 return rc;
1268}
1269
1270
1271static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1272{
1273 int rc = VINF_SUCCESS;
1274 EMSTATE enmOldState = pVCpu->em.s.enmState;
1275 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1276
1277 Log(("Single step BEGIN:\n"));
1278 for (uint32_t i = 0; i < cIterations; i++)
1279 {
1280 DBGFR3PrgStep(pVCpu);
1281 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1282 rc = emR3HwAccStep(pVM, pVCpu);
1283 if ( rc != VINF_SUCCESS
1284 || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
1285 break;
1286 }
1287 Log(("Single step END: rc=%Rrc\n", rc));
1288 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1289 pVCpu->em.s.enmState = enmOldState;
1290 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
1291}
1292
1293
1294static int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1295{
1296 EMSTATE enmOldState = pVCpu->em.s.enmState;
1297
1298 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1299
1300 Log(("Single step BEGIN:\n"));
1301 for (uint32_t i = 0; i < cIterations; i++)
1302 {
1303 DBGFR3PrgStep(pVCpu);
1304 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1305 emR3RemStep(pVM, pVCpu);
1306 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1307 break;
1308 }
1309 Log(("Single step END:\n"));
1310 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1311 pVCpu->em.s.enmState = enmOldState;
1312 return VINF_EM_RESCHEDULE;
1313}
1314
1315#endif /* DEBUG */
1316
1317
1318/**
1319 * Executes one (or perhaps a few more) instruction(s).
1320 *
1321 * @returns VBox status code suitable for EM.
1322 *
1323 * @param pVM VM handle.
1324 * @param pVCpu VMCPU handle
1325 * @param rcGC GC return code
1326 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1327 * instruction and prefix the log output with this text.
1328 */
1329#ifdef LOG_ENABLED
1330static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
1331#else
1332static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
1333#endif
1334{
1335 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1336 int rc;
1337
1338 /*
1339 *
1340 * The simple solution is to use the recompiler.
1341 * The better solution is to disassemble the current instruction and
1342 * try handle as many as possible without using REM.
1343 *
1344 */
1345
1346#ifdef LOG_ENABLED
1347 /*
1348 * Disassemble the instruction if requested.
1349 */
1350 if (pszPrefix)
1351 {
1352 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1353 DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
1354 }
1355#endif /* LOG_ENABLED */
1356
1357 /*
1358 * PATM is making life more interesting.
1359 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1360 * tell PATM there is a trap in this code and have it take the appropriate actions
1361 * to allow us execute the code in REM.
1362 */
1363 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1364 {
1365 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
1366
1367 RTGCPTR pNewEip;
1368 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1369 switch (rc)
1370 {
1371 /*
1372 * It's not very useful to emulate a single instruction and then go back to raw
1373 * mode; just execute the whole block until IF is set again.
1374 */
1375 case VINF_SUCCESS:
1376 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
1377 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1378 pCtx->eip = pNewEip;
1379 Assert(pCtx->eip);
1380
1381 if (pCtx->eflags.Bits.u1IF)
1382 {
1383 /*
1384 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1385 */
1386 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1387 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1388 }
1389 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1390 {
1391 /* special case: iret, that sets IF, detected a pending irq/event */
1392 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");
1393 }
1394 return VINF_EM_RESCHEDULE_REM;
1395
1396 /*
1397 * One instruction.
1398 */
1399 case VINF_PATCH_EMULATE_INSTR:
1400 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1401 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1402 pCtx->eip = pNewEip;
1403 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1404
1405 /*
1406 * The patch was disabled, hand it to the REM.
1407 */
1408 case VERR_PATCH_DISABLED:
1409 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
1410 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1411 pCtx->eip = pNewEip;
1412 if (pCtx->eflags.Bits.u1IF)
1413 {
1414 /*
1415 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1416 */
1417 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1418 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1419 }
1420 return VINF_EM_RESCHEDULE_REM;
1421
1422 /* Force continued patch exection; usually due to write monitored stack. */
1423 case VINF_PATCH_CONTINUE:
1424 return VINF_SUCCESS;
1425
1426 default:
1427 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));
1428 return VERR_IPE_UNEXPECTED_STATUS;
1429 }
1430 }
1431
1432#if 0
1433 /* Try our own instruction emulator before falling back to the recompiler. */
1434 DISCPUSTATE Cpu;
1435 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1436 if (RT_SUCCESS(rc))
1437 {
1438 uint32_t size;
1439
1440 switch (Cpu.pCurInstr->opcode)
1441 {
1442 /* @todo we can do more now */
1443 case OP_MOV:
1444 case OP_AND:
1445 case OP_OR:
1446 case OP_XOR:
1447 case OP_POP:
1448 case OP_INC:
1449 case OP_DEC:
1450 case OP_XCHG:
1451 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
1452 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1453 if (RT_SUCCESS(rc))
1454 {
1455 pCtx->rip += Cpu.opsize;
1456#ifdef EM_NOTIFY_HWACCM
1457 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1458 HWACCMR3NotifyEmulated(pVCpu);
1459#endif
1460 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1461 return rc;
1462 }
1463 if (rc != VERR_EM_INTERPRETER)
1464 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
1465 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1466 break;
1467 }
1468 }
1469#endif /* 0 */
1470 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
1471 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
1472 EMRemLock(pVM);
1473 /* Flush the recompiler TLB if the VCPU has changed. */
1474 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1475 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1476 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1477
1478 rc = REMR3EmulateInstruction(pVM, pVCpu);
1479 EMRemUnlock(pVM);
1480 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
1481
1482#ifdef EM_NOTIFY_HWACCM
1483 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1484 HWACCMR3NotifyEmulated(pVCpu);
1485#endif
1486 return rc;
1487}
1488
1489
1490/**
1491 * Executes one (or perhaps a few more) instruction(s).
1492 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1493 *
1494 * @returns VBox status code suitable for EM.
1495 * @param pVM VM handle.
1496 * @param pVCpu VMCPU handle.
1497 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1498 * instruction and prefix the log output with this text.
1499 * @param rcGC GC return code
1500 */
1501DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
1502{
1503#ifdef LOG_ENABLED
1504 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
1505#else
1506 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);
1507#endif
1508}
1509
1510/**
1511 * Executes one (or perhaps a few more) IO instruction(s).
1512 *
1513 * @returns VBox status code suitable for EM.
1514 * @param pVM VM handle.
1515 * @param pVCpu VMCPU handle.
1516 */
1517static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
1518{
1519 int rc;
1520 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1521
1522 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
1523
1524 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1525 * as io instructions tend to come in packages of more than one
1526 */
1527 DISCPUSTATE Cpu;
1528 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
1529 if (RT_SUCCESS(rc))
1530 {
1531 rc = VINF_EM_RAW_EMULATE_INSTR;
1532
1533 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1534 {
1535 switch (Cpu.pCurInstr->opcode)
1536 {
1537 case OP_IN:
1538 {
1539 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1540 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1541 break;
1542 }
1543
1544 case OP_OUT:
1545 {
1546 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1547 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1548 break;
1549 }
1550 }
1551 }
1552 else if (Cpu.prefix & PREFIX_REP)
1553 {
1554 switch (Cpu.pCurInstr->opcode)
1555 {
1556 case OP_INSB:
1557 case OP_INSWD:
1558 {
1559 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1560 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1561 break;
1562 }
1563
1564 case OP_OUTSB:
1565 case OP_OUTSWD:
1566 {
1567 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1568 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1569 break;
1570 }
1571 }
1572 }
1573
1574 /*
1575 * Handled the I/O return codes.
1576 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1577 */
1578 if (IOM_SUCCESS(rc))
1579 {
1580 pCtx->rip += Cpu.opsize;
1581 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1582 return rc;
1583 }
1584
1585 if (rc == VINF_EM_RAW_GUEST_TRAP)
1586 {
1587 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1588 rc = emR3RawGuestTrap(pVM, pVCpu);
1589 return rc;
1590 }
1591 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1592
1593 if (RT_FAILURE(rc))
1594 {
1595 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1596 return rc;
1597 }
1598 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Rrc\n", rc));
1599 }
1600 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1601 return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");
1602}
1603
1604
1605/**
1606 * Handle a guest context trap.
1607 *
1608 * @returns VBox status code suitable for EM.
1609 * @param pVM VM handle.
1610 * @param pVCpu VMCPU handle.
1611 */
1612static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
1613{
1614 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1615
1616 /*
1617 * Get the trap info.
1618 */
1619 uint8_t u8TrapNo;
1620 TRPMEVENT enmType;
1621 RTGCUINT uErrorCode;
1622 RTGCUINTPTR uCR2;
1623 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1624 if (RT_FAILURE(rc))
1625 {
1626 AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
1627 return rc;
1628 }
1629
1630 /*
1631 * Traps can be directly forwarded in hardware accelerated mode.
1632 */
1633 if (HWACCMIsEnabled(pVM))
1634 {
1635#ifdef LOGGING_ENABLED
1636 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1637 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1638#endif
1639 return VINF_EM_RESCHEDULE_HWACC;
1640 }
1641
1642#if 1 /* Experimental: Review, disable if it causes trouble. */
1643 /*
1644 * Handle traps in patch code first.
1645 *
1646 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
1647 * but several traps isn't handled specially by TRPM in RC and we end up here
1648 * instead. One example is #DE.
1649 */
1650 uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
1651 if ( uCpl == 0
1652 && PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1653 {
1654 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
1655 return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
1656 }
1657#endif
1658
1659 /*
1660 * If the guest gate is marked unpatched, then we will check again if we can patch it.
1661 * (This assumes that we've already tried and failed to dispatch the trap in
1662 * RC for the gates that already has been patched. Which is true for most high
1663 * volume traps, because these are handled specially, but not for odd ones like #DE.)
1664 */
1665 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
1666 {
1667 CSAMR3CheckGates(pVM, u8TrapNo, 1);
1668 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
1669
1670 /* If it was successful, then we could go back to raw mode. */
1671 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
1672 {
1673 /* Must check pending forced actions as our IDT or GDT might be out of sync. */
1674 rc = EMR3CheckRawForcedActions(pVM, pVCpu);
1675 AssertRCReturn(rc, rc);
1676
1677 TRPMERRORCODE enmError = uErrorCode != ~0U
1678 ? TRPM_TRAP_HAS_ERRORCODE
1679 : TRPM_TRAP_NO_ERRORCODE;
1680 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
1681 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
1682 {
1683 TRPMResetTrap(pVCpu);
1684 return VINF_EM_RESCHEDULE_RAW;
1685 }
1686 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
1687 }
1688 }
1689
1690 /*
1691 * Scan kernel code that traps; we might not get another chance.
1692 */
1693 /** @todo move this up before the dispatching? */
1694 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1695 && !pCtx->eflags.Bits.u1VM)
1696 {
1697 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1698 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1699 }
1700
1701 /*
1702 * Trap specific handling.
1703 */
1704 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1705 {
1706 /*
1707 * If MONITOR & MWAIT are supported, then interpret them here.
1708 */
1709 DISCPUSTATE cpu;
1710 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1711 if ( RT_SUCCESS(rc)
1712 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1713 {
1714 uint32_t u32Dummy, u32Features, u32ExtFeatures;
1715 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1716 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1717 {
1718 rc = TRPMResetTrap(pVCpu);
1719 AssertRC(rc);
1720
1721 uint32_t opsize;
1722 rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
1723 if (RT_SUCCESS(rc))
1724 {
1725 pCtx->rip += cpu.opsize;
1726#ifdef EM_NOTIFY_HWACCM
1727 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1728 HWACCMR3NotifyEmulated(pVCpu);
1729#endif
1730 return rc;
1731 }
1732 return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");
1733 }
1734 }
1735 }
1736 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1737 {
1738 /*
1739 * Handle I/O bitmap?
1740 */
1741 /** @todo We're not supposed to be here with a false guest trap concerning
1742 * I/O access. We can easily handle those in RC. */
1743 DISCPUSTATE cpu;
1744 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1745 if ( RT_SUCCESS(rc)
1746 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1747 {
1748 /*
1749 * We should really check the TSS for the IO bitmap, but it's not like this
1750 * lazy approach really makes things worse.
1751 */
1752 rc = TRPMResetTrap(pVCpu);
1753 AssertRC(rc);
1754 return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
1755 }
1756 }
1757
1758#ifdef LOG_ENABLED
1759 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1760 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1761
1762 /* Get guest page information. */
1763 uint64_t fFlags = 0;
1764 RTGCPHYS GCPhys = 0;
1765 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);
1766 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1767 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1768 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1769 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1770#endif
1771
1772 /*
1773 * #PG has CR2.
1774 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1775 */
1776 if (u8TrapNo == 14 /* #PG */)
1777 pCtx->cr2 = uCR2;
1778
1779 return VINF_EM_RESCHEDULE_REM;
1780}
1781
1782
1783/**
1784 * Handle a ring switch trap.
1785 * Need to do statistics and to install patches. The result is going to REM.
1786 *
1787 * @returns VBox status code suitable for EM.
1788 * @param pVM VM handle.
1789 * @param pVCpu VMCPU handle.
1790 */
1791static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
1792{
1793 int rc;
1794 DISCPUSTATE Cpu;
1795 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1796
1797 /*
1798 * sysenter, syscall & callgate
1799 */
1800 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1801 if (RT_SUCCESS(rc))
1802 {
1803 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1804 {
1805 if (pCtx->SysEnter.cs != 0)
1806 {
1807 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1808 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1809 if (RT_SUCCESS(rc))
1810 {
1811 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");
1812 return VINF_EM_RESCHEDULE_RAW;
1813 }
1814 }
1815 }
1816
1817#ifdef VBOX_WITH_STATISTICS
1818 switch (Cpu.pCurInstr->opcode)
1819 {
1820 case OP_SYSENTER:
1821 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);
1822 break;
1823 case OP_SYSEXIT:
1824 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);
1825 break;
1826 case OP_SYSCALL:
1827 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);
1828 break;
1829 case OP_SYSRET:
1830 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);
1831 break;
1832 }
1833#endif
1834 }
1835 else
1836 AssertRC(rc);
1837
1838 /* go to the REM to emulate a single instruction */
1839 return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
1840}
1841
1842
1843/**
1844 * Handle a trap (\#PF or \#GP) in patch code
1845 *
1846 * @returns VBox status code suitable for EM.
1847 * @param pVM VM handle.
1848 * @param pVCpu VMCPU handle.
1849 * @param pCtx CPU context
1850 * @param gcret GC return code
1851 */
1852static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
1853{
1854 uint8_t u8TrapNo;
1855 int rc;
1856 TRPMEVENT enmType;
1857 RTGCUINT uErrorCode;
1858 RTGCUINTPTR uCR2;
1859
1860 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1861
1862 if (gcret == VINF_PATM_PATCH_INT3)
1863 {
1864 u8TrapNo = 3;
1865 uCR2 = 0;
1866 uErrorCode = 0;
1867 }
1868 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1869 {
1870 /* No active trap in this case. Kind of ugly. */
1871 u8TrapNo = X86_XCPT_GP;
1872 uCR2 = 0;
1873 uErrorCode = 0;
1874 }
1875 else
1876 {
1877 rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1878 if (RT_FAILURE(rc))
1879 {
1880 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
1881 return rc;
1882 }
1883 /* Reset the trap as we'll execute the original instruction again. */
1884 TRPMResetTrap(pVCpu);
1885 }
1886
1887 /*
1888 * Deal with traps inside patch code.
1889 * (This code won't run outside GC.)
1890 */
1891 if (u8TrapNo != 1)
1892 {
1893#ifdef LOG_ENABLED
1894 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1895 DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");
1896
1897 DISCPUSTATE Cpu;
1898 int rc;
1899
1900 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1901 if ( RT_SUCCESS(rc)
1902 && Cpu.pCurInstr->opcode == OP_IRET)
1903 {
1904 uint32_t eip, selCS, uEFlags;
1905
1906 /* Iret crashes are bad as we have already changed the flags on the stack */
1907 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);
1908 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);
1909 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);
1910 if (rc == VINF_SUCCESS)
1911 {
1912 if ( (uEFlags & X86_EFL_VM)
1913 || (selCS & X86_SEL_RPL) == 3)
1914 {
1915 uint32_t selSS, esp;
1916
1917 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);
1918 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);
1919
1920 if (uEFlags & X86_EFL_VM)
1921 {
1922 uint32_t selDS, selES, selFS, selGS;
1923 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);
1924 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);
1925 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);
1926 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);
1927 if (rc == VINF_SUCCESS)
1928 {
1929 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1930 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1931 }
1932 }
1933 else
1934 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1935 }
1936 else
1937 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));
1938 }
1939 }
1940#endif /* LOG_ENABLED */
1941 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1942 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1943
1944 RTGCPTR pNewEip;
1945 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1946 switch (rc)
1947 {
1948 /*
1949 * Execute the faulting instruction.
1950 */
1951 case VINF_SUCCESS:
1952 {
1953 /** @todo execute a whole block */
1954 Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
1955 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1956 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1957
1958 pCtx->eip = pNewEip;
1959 AssertRelease(pCtx->eip);
1960
1961 if (pCtx->eflags.Bits.u1IF)
1962 {
1963 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1964 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1965 */
1966 if ( u8TrapNo == X86_XCPT_GP
1967 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1968 {
1969 /** @todo move to PATMR3HandleTrap */
1970 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));
1971 PATMR3RemovePatch(pVM, pCtx->eip);
1972 }
1973
1974 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1975 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1976
1977 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1978 /* Interrupts are enabled; just go back to the original instruction.
1979 return VINF_SUCCESS; */
1980 }
1981 return VINF_EM_RESCHEDULE_REM;
1982 }
1983
1984 /*
1985 * One instruction.
1986 */
1987 case VINF_PATCH_EMULATE_INSTR:
1988 Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1989 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1990 pCtx->eip = pNewEip;
1991 AssertRelease(pCtx->eip);
1992 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
1993
1994 /*
1995 * The patch was disabled, hand it to the REM.
1996 */
1997 case VERR_PATCH_DISABLED:
1998 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1999 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
2000 pCtx->eip = pNewEip;
2001 AssertRelease(pCtx->eip);
2002
2003 if (pCtx->eflags.Bits.u1IF)
2004 {
2005 /*
2006 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
2007 */
2008 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
2009 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
2010 }
2011 return VINF_EM_RESCHEDULE_REM;
2012
2013 /* Force continued patch exection; usually due to write monitored stack. */
2014 case VINF_PATCH_CONTINUE:
2015 return VINF_SUCCESS;
2016
2017 /*
2018 * Anything else is *fatal*.
2019 */
2020 default:
2021 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));
2022 return VERR_IPE_UNEXPECTED_STATUS;
2023 }
2024 }
2025 return VINF_SUCCESS;
2026}
2027
2028
2029/**
2030 * Handle a privileged instruction.
2031 *
2032 * @returns VBox status code suitable for EM.
2033 * @param pVM VM handle.
2034 * @param pVCpu VMCPU handle;
2035 */
2036static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
2037{
2038 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2039 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2040
2041 Assert(!pCtx->eflags.Bits.u1VM);
2042
2043 if (PATMIsEnabled(pVM))
2044 {
2045 /*
2046 * Check if in patch code.
2047 */
2048 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2049 {
2050#ifdef LOG_ENABLED
2051 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2052#endif
2053 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
2054 return VERR_EM_RAW_PATCH_CONFLICT;
2055 }
2056 if ( (pCtx->ss & X86_SEL_RPL) == 0
2057 && !pCtx->eflags.Bits.u1VM
2058 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2059 {
2060 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2061 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
2062 if (RT_SUCCESS(rc))
2063 {
2064#ifdef LOG_ENABLED
2065 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2066#endif
2067 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");
2068 return VINF_SUCCESS;
2069 }
2070 }
2071 }
2072
2073#ifdef LOG_ENABLED
2074 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
2075 {
2076 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2077 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2078 }
2079#endif
2080
2081 /*
2082 * Instruction statistics and logging.
2083 */
2084 DISCPUSTATE Cpu;
2085 int rc;
2086
2087 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");
2088 if (RT_SUCCESS(rc))
2089 {
2090#ifdef VBOX_WITH_STATISTICS
2091 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);
2092 switch (Cpu.pCurInstr->opcode)
2093 {
2094 case OP_INVLPG:
2095 STAM_COUNTER_INC(&pStats->StatInvlpg);
2096 break;
2097 case OP_IRET:
2098 STAM_COUNTER_INC(&pStats->StatIret);
2099 break;
2100 case OP_CLI:
2101 STAM_COUNTER_INC(&pStats->StatCli);
2102 emR3RecordCli(pVM, pVCpu, pCtx->rip);
2103 break;
2104 case OP_STI:
2105 STAM_COUNTER_INC(&pStats->StatSti);
2106 break;
2107 case OP_INSB:
2108 case OP_INSWD:
2109 case OP_IN:
2110 case OP_OUTSB:
2111 case OP_OUTSWD:
2112 case OP_OUT:
2113 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
2114 break;
2115
2116 case OP_MOV_CR:
2117 if (Cpu.param1.flags & USE_REG_GEN32)
2118 {
2119 //read
2120 Assert(Cpu.param2.flags & USE_REG_CR);
2121 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
2122 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
2123 }
2124 else
2125 {
2126 //write
2127 Assert(Cpu.param1.flags & USE_REG_CR);
2128 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
2129 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
2130 }
2131 break;
2132
2133 case OP_MOV_DR:
2134 STAM_COUNTER_INC(&pStats->StatMovDRx);
2135 break;
2136 case OP_LLDT:
2137 STAM_COUNTER_INC(&pStats->StatMovLldt);
2138 break;
2139 case OP_LIDT:
2140 STAM_COUNTER_INC(&pStats->StatMovLidt);
2141 break;
2142 case OP_LGDT:
2143 STAM_COUNTER_INC(&pStats->StatMovLgdt);
2144 break;
2145 case OP_SYSENTER:
2146 STAM_COUNTER_INC(&pStats->StatSysEnter);
2147 break;
2148 case OP_SYSEXIT:
2149 STAM_COUNTER_INC(&pStats->StatSysExit);
2150 break;
2151 case OP_SYSCALL:
2152 STAM_COUNTER_INC(&pStats->StatSysCall);
2153 break;
2154 case OP_SYSRET:
2155 STAM_COUNTER_INC(&pStats->StatSysRet);
2156 break;
2157 case OP_HLT:
2158 STAM_COUNTER_INC(&pStats->StatHlt);
2159 break;
2160 default:
2161 STAM_COUNTER_INC(&pStats->StatMisc);
2162 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
2163 break;
2164 }
2165#endif /* VBOX_WITH_STATISTICS */
2166 if ( (pCtx->ss & X86_SEL_RPL) == 0
2167 && !pCtx->eflags.Bits.u1VM
2168 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
2169 {
2170 uint32_t size;
2171
2172 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2173 switch (Cpu.pCurInstr->opcode)
2174 {
2175 case OP_CLI:
2176 pCtx->eflags.u32 &= ~X86_EFL_IF;
2177 Assert(Cpu.opsize == 1);
2178 pCtx->rip += Cpu.opsize;
2179 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2180 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
2181
2182 case OP_STI:
2183 pCtx->eflags.u32 |= X86_EFL_IF;
2184 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
2185 Assert(Cpu.opsize == 1);
2186 pCtx->rip += Cpu.opsize;
2187 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2188 return VINF_SUCCESS;
2189
2190 case OP_HLT:
2191 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
2192 {
2193 PATMTRANSSTATE enmState;
2194 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
2195
2196 if (enmState == PATMTRANS_OVERWRITTEN)
2197 {
2198 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2199 Assert(rc == VERR_PATCH_DISABLED);
2200 /* Conflict detected, patch disabled */
2201 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));
2202
2203 enmState = PATMTRANS_SAFE;
2204 }
2205
2206 /* The translation had better be successful. Otherwise we can't recover. */
2207 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));
2208 if (enmState != PATMTRANS_OVERWRITTEN)
2209 pCtx->eip = pOrgInstrGC;
2210 }
2211 /* no break; we could just return VINF_EM_HALT here */
2212
2213 case OP_MOV_CR:
2214 case OP_MOV_DR:
2215#ifdef LOG_ENABLED
2216 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2217 {
2218 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2219 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2220 }
2221#endif
2222
2223 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
2224 if (RT_SUCCESS(rc))
2225 {
2226 pCtx->rip += Cpu.opsize;
2227#ifdef EM_NOTIFY_HWACCM
2228 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
2229 HWACCMR3NotifyEmulated(pVCpu);
2230#endif
2231 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2232
2233 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
2234 && Cpu.param1.flags == USE_REG_CR /* write */
2235 )
2236 {
2237 /* Deal with CR0 updates inside patch code that force
2238 * us to go to the recompiler.
2239 */
2240 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
2241 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
2242 {
2243 PATMTRANSSTATE enmState;
2244 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
2245
2246 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));
2247 if (enmState == PATMTRANS_OVERWRITTEN)
2248 {
2249 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2250 Assert(rc == VERR_PATCH_DISABLED);
2251 /* Conflict detected, patch disabled */
2252 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));
2253 enmState = PATMTRANS_SAFE;
2254 }
2255 /* The translation had better be successful. Otherwise we can't recover. */
2256 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));
2257 if (enmState != PATMTRANS_OVERWRITTEN)
2258 pCtx->rip = pOrgInstrGC;
2259 }
2260
2261 /* Reschedule is necessary as the execution/paging mode might have changed. */
2262 return VINF_EM_RESCHEDULE;
2263 }
2264 return rc; /* can return VINF_EM_HALT as well. */
2265 }
2266 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);
2267 break; /* fall back to the recompiler */
2268 }
2269 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2270 }
2271 }
2272
2273 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2274 return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
2275
2276 return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");
2277}
2278
2279
2280/**
2281 * Update the forced rawmode execution modifier.
2282 *
2283 * This function is called when we're returning from the raw-mode loop(s). If we're
2284 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2285 * if not in patch code, the flag will be cleared.
2286 *
2287 * We should never interrupt patch code while it's being executed. Cli patches can
2288 * contain big code blocks, but they are always executed with IF=0. Other patches
2289 * replace single instructions and should be atomic.
2290 *
2291 * @returns Updated rc.
2292 *
2293 * @param pVM The VM handle.
2294 * @param pVCpu The VMCPU handle.
2295 * @param pCtx The guest CPU context.
2296 * @param rc The result code.
2297 */
2298DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2299{
2300 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2301 {
2302 /* ignore reschedule attempts. */
2303 switch (rc)
2304 {
2305 case VINF_EM_RESCHEDULE:
2306 case VINF_EM_RESCHEDULE_REM:
2307 LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));
2308 rc = VINF_SUCCESS;
2309 break;
2310 }
2311 pVCpu->em.s.fForceRAW = true;
2312 }
2313 else
2314 pVCpu->em.s.fForceRAW = false;
2315 return rc;
2316}
2317
2318
2319/**
2320 * Check for pending raw actions
2321 *
2322 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2323 * EM statuses.
2324 * @param pVM The VM to operate on.
2325 * @param pVCpu The VMCPU handle.
2326 */
2327VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
2328{
2329 return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
2330}
2331
2332
2333/**
2334 * Process raw-mode specific forced actions.
2335 *
2336 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2337 *
2338 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2339 * EM statuses.
2340 * @param pVM The VM handle.
2341 * @param pVCpu The VMCPU handle.
2342 * @param pCtx The guest CPUM register context.
2343 */
2344static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2345{
2346 /*
2347 * Note that the order is *vitally* important!
2348 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2349 */
2350
2351
2352 /*
2353 * Sync selector tables.
2354 */
2355 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
2356 {
2357 int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
2358 if (RT_FAILURE(rc))
2359 return rc;
2360 }
2361
2362 /*
2363 * Sync IDT.
2364 *
2365 * The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage
2366 * and PGMShwModifyPage, so we're in for trouble if for instance a
2367 * PGMSyncCR3+pgmPoolClearAll is pending.
2368 */
2369 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
2370 {
2371 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
2372 && EMIsRawRing0Enabled(pVM)
2373 && CSAMIsEnabled(pVM))
2374 {
2375 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2376 if (RT_FAILURE(rc))
2377 return rc;
2378 }
2379
2380 int rc = TRPMR3SyncIDT(pVM, pVCpu);
2381 if (RT_FAILURE(rc))
2382 return rc;
2383 }
2384
2385 /*
2386 * Sync TSS.
2387 */
2388 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
2389 {
2390 int rc = SELMR3SyncTSS(pVM, pVCpu);
2391 if (RT_FAILURE(rc))
2392 return rc;
2393 }
2394
2395 /*
2396 * Sync page directory.
2397 */
2398 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2399 {
2400 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2401 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2402 if (RT_FAILURE(rc))
2403 return rc;
2404
2405 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2406
2407 /* Prefetch pages for EIP and ESP. */
2408 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2409 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2410 if (rc == VINF_SUCCESS)
2411 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2412 if (rc != VINF_SUCCESS)
2413 {
2414 if (rc != VINF_PGM_SYNC_CR3)
2415 {
2416 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2417 return rc;
2418 }
2419 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2420 if (RT_FAILURE(rc))
2421 return rc;
2422 }
2423 /** @todo maybe prefetch the supervisor stack page as well */
2424 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2425 }
2426
2427 /*
2428 * Allocate handy pages (just in case the above actions have consumed some pages).
2429 */
2430 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2431 {
2432 int rc = PGMR3PhysAllocateHandyPages(pVM);
2433 if (RT_FAILURE(rc))
2434 return rc;
2435 }
2436
2437 /*
2438 * Check whether we're out of memory now.
2439 *
2440 * This may stem from some of the above actions or operations that has been executed
2441 * since we ran FFs. The allocate handy pages must for instance always be followed by
2442 * this check.
2443 */
2444 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2445 return VINF_EM_NO_MEMORY;
2446
2447 return VINF_SUCCESS;
2448}
2449
2450
2451/**
2452 * Executes raw code.
2453 *
2454 * This function contains the raw-mode version of the inner
2455 * execution loop (the outer loop being in EMR3ExecuteVM()).
2456 *
2457 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2458 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2459 *
2460 * @param pVM VM handle.
2461 * @param pVCpu VMCPU handle.
2462 * @param pfFFDone Where to store an indicator telling whether or not
2463 * FFs were done before returning.
2464 */
2465static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2466{
2467 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);
2468
2469 int rc = VERR_INTERNAL_ERROR;
2470 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2471 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2472 pVCpu->em.s.fForceRAW = false;
2473 *pfFFDone = false;
2474
2475
2476 /*
2477 *
2478 * Spin till we get a forced action or raw mode status code resulting in
2479 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2480 *
2481 */
2482 for (;;)
2483 {
2484 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);
2485
2486 /*
2487 * Check various preconditions.
2488 */
2489#ifdef VBOX_STRICT
2490 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
2491 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2492 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2493 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2494 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2495 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2496 && PGMMapHasConflicts(pVM))
2497 {
2498 PGMMapCheck(pVM);
2499 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2500 return VERR_INTERNAL_ERROR;
2501 }
2502#endif /* VBOX_STRICT */
2503
2504 /*
2505 * Process high priority pre-execution raw-mode FFs.
2506 */
2507 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2508 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2509 {
2510 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2511 if (rc != VINF_SUCCESS)
2512 break;
2513 }
2514
2515 /*
2516 * If we're going to execute ring-0 code, the guest state needs to
2517 * be modified a bit and some of the state components (IF, SS/CS RPL,
2518 * and perhaps EIP) needs to be stored with PATM.
2519 */
2520 rc = CPUMRawEnter(pVCpu, NULL);
2521 if (rc != VINF_SUCCESS)
2522 {
2523 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2524 break;
2525 }
2526
2527 /*
2528 * Scan code before executing it. Don't bother with user mode or V86 code
2529 */
2530 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2531 && !pCtx->eflags.Bits.u1VM
2532 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2533 {
2534 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
2535 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2536 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
2537 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2538 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2539 {
2540 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2541 if (rc != VINF_SUCCESS)
2542 {
2543 rc = CPUMRawLeave(pVCpu, NULL, rc);
2544 break;
2545 }
2546 }
2547 }
2548
2549#ifdef LOG_ENABLED
2550 /*
2551 * Log important stuff before entering GC.
2552 */
2553 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2554 if (pCtx->eflags.Bits.u1VM)
2555 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2556 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2557 {
2558 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2559 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2560 }
2561 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2562 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2563#endif /* LOG_ENABLED */
2564
2565
2566
2567 /*
2568 * Execute the code.
2569 */
2570 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2571 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
2572 rc = VMMR3RawRunGC(pVM, pVCpu);
2573 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);
2574 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
2575
2576 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2577 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
2578
2579
2580
2581 /*
2582 * Restore the real CPU state and deal with high priority post
2583 * execution FFs before doing anything else.
2584 */
2585 rc = CPUMRawLeave(pVCpu, NULL, rc);
2586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
2587 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
2588 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
2589 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
2590
2591#ifdef VBOX_STRICT
2592 /*
2593 * Assert TSS consistency & rc vs patch code.
2594 */
2595 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2596 && EMIsRawRing0Enabled(pVM))
2597 SELMR3CheckTSS(pVM);
2598 switch (rc)
2599 {
2600 case VINF_SUCCESS:
2601 case VINF_EM_RAW_INTERRUPT:
2602 case VINF_PATM_PATCH_TRAP_PF:
2603 case VINF_PATM_PATCH_TRAP_GP:
2604 case VINF_PATM_PATCH_INT3:
2605 case VINF_PATM_CHECK_PATCH_PAGE:
2606 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2607 case VINF_EM_RAW_GUEST_TRAP:
2608 case VINF_EM_RESCHEDULE_RAW:
2609 break;
2610
2611 default:
2612 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2613 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));
2614 break;
2615 }
2616 /*
2617 * Let's go paranoid!
2618 */
2619 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2620 && PGMMapHasConflicts(pVM))
2621 {
2622 PGMMapCheck(pVM);
2623 AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));
2624 return VERR_INTERNAL_ERROR;
2625 }
2626#endif /* VBOX_STRICT */
2627
2628 /*
2629 * Process the returned status code.
2630 */
2631 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2632 {
2633 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2634 break;
2635 }
2636 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
2637 if (rc != VINF_SUCCESS)
2638 {
2639 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2640 if (rc != VINF_SUCCESS)
2641 {
2642 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2643 break;
2644 }
2645 }
2646
2647 /*
2648 * Check and execute forced actions.
2649 */
2650#ifdef VBOX_HIGH_RES_TIMERS_HACK
2651 TMTimerPollVoid(pVM, pVCpu);
2652#endif
2653 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2654 if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
2655 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2656 {
2657 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2658
2659 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
2660 rc = emR3ForcedActions(pVM, pVCpu, rc);
2661 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);
2662 if ( rc != VINF_SUCCESS
2663 && rc != VINF_EM_RESCHEDULE_RAW)
2664 {
2665 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2666 if (rc != VINF_SUCCESS)
2667 {
2668 *pfFFDone = true;
2669 break;
2670 }
2671 }
2672 }
2673 }
2674
2675 /*
2676 * Return to outer loop.
2677 */
2678#if defined(LOG_ENABLED) && defined(DEBUG)
2679 RTLogFlush(NULL);
2680#endif
2681 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);
2682 return rc;
2683}
2684
2685
2686/**
2687 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2688 *
2689 * This function contains the raw-mode version of the inner
2690 * execution loop (the outer loop being in EMR3ExecuteVM()).
2691 *
2692 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2693 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2694 *
2695 * @param pVM VM handle.
2696 * @param pVCpu VMCPU handle.
2697 * @param pfFFDone Where to store an indicator telling whether or not
2698 * FFs were done before returning.
2699 */
2700static int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2701{
2702 int rc = VERR_INTERNAL_ERROR;
2703 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2704
2705 LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));
2706 *pfFFDone = false;
2707
2708 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
2709
2710#ifdef EM_NOTIFY_HWACCM
2711 HWACCMR3NotifyScheduled(pVCpu);
2712#endif
2713
2714 /*
2715 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
2716 */
2717 for (;;)
2718 {
2719 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
2720
2721 /*
2722 * Process high priority pre-execution raw-mode FFs.
2723 */
2724 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
2725 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2726 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2727 {
2728 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2729 if (rc != VINF_SUCCESS)
2730 break;
2731 }
2732
2733#ifdef LOG_ENABLED
2734 /*
2735 * Log important stuff before entering GC.
2736 */
2737 if (TRPMHasTrap(pVCpu))
2738 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));
2739
2740 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
2741
2742 if (pVM->cCPUs == 1)
2743 {
2744 if (pCtx->eflags.Bits.u1VM)
2745 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
2746 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
2747 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2748 else
2749 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2750 }
2751 else
2752 {
2753 if (pCtx->eflags.Bits.u1VM)
2754 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
2755 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
2756 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2757 else
2758 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2759 }
2760#endif /* LOG_ENABLED */
2761
2762 /*
2763 * Execute the code.
2764 */
2765 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
2766 STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
2767 rc = VMMR3HwAccRunGC(pVM, pVCpu);
2768 STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
2769
2770 /*
2771 * Deal with high priority post execution FFs before doing anything else.
2772 */
2773 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
2774 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
2775 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
2776 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
2777
2778 /*
2779 * Process the returned status code.
2780 */
2781 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2782 break;
2783
2784 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
2785 if (rc != VINF_SUCCESS)
2786 break;
2787
2788 /*
2789 * Check and execute forced actions.
2790 */
2791#ifdef VBOX_HIGH_RES_TIMERS_HACK
2792 TMTimerPollVoid(pVM, pVCpu);
2793#endif
2794 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
2795 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))
2796 {
2797 rc = emR3ForcedActions(pVM, pVCpu, rc);
2798 if ( rc != VINF_SUCCESS
2799 && rc != VINF_EM_RESCHEDULE_HWACC)
2800 {
2801 *pfFFDone = true;
2802 break;
2803 }
2804 }
2805 }
2806
2807 /*
2808 * Return to outer loop.
2809 */
2810#if defined(LOG_ENABLED) && defined(DEBUG)
2811 RTLogFlush(NULL);
2812#endif
2813 return rc;
2814}
2815
2816
2817/**
2818 * Decides whether to execute RAW, HWACC or REM.
2819 *
2820 * @returns new EM state
2821 * @param pVM The VM.
2822 * @param pVCpu The VMCPU handle.
2823 * @param pCtx The CPU context.
2824 */
2825static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2826{
2827 /*
2828 * When forcing raw-mode execution, things are simple.
2829 */
2830 if (pVCpu->em.s.fForceRAW)
2831 return EMSTATE_RAW;
2832
2833 /*
2834 * We stay in the wait for SIPI state unless explicitly told otherwise.
2835 */
2836 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
2837 return EMSTATE_WAIT_SIPI;
2838
2839 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2840 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2841 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2842
2843 X86EFLAGS EFlags = pCtx->eflags;
2844 if (HWACCMIsEnabled(pVM))
2845 {
2846 /* Hardware accelerated raw-mode:
2847 *
2848 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
2849 */
2850 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
2851 return EMSTATE_HWACC;
2852
2853 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
2854 * off monitoring features essential for raw mode! */
2855 return EMSTATE_REM;
2856 }
2857
2858 /*
2859 * Standard raw-mode:
2860 *
2861 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
2862 * or 32 bits protected mode ring 0 code
2863 *
2864 * The tests are ordered by the likelyhood of being true during normal execution.
2865 */
2866 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
2867 {
2868 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
2869 return EMSTATE_REM;
2870 }
2871
2872#ifndef VBOX_RAW_V86
2873 if (EFlags.u32 & X86_EFL_VM) {
2874 Log2(("raw mode refused: VM_MASK\n"));
2875 return EMSTATE_REM;
2876 }
2877#endif
2878
2879 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
2880 uint32_t u32CR0 = pCtx->cr0;
2881 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
2882 {
2883 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
2884 return EMSTATE_REM;
2885 }
2886
2887 if (pCtx->cr4 & X86_CR4_PAE)
2888 {
2889 uint32_t u32Dummy, u32Features;
2890
2891 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
2892 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
2893 return EMSTATE_REM;
2894 }
2895
2896 unsigned uSS = pCtx->ss;
2897 if ( pCtx->eflags.Bits.u1VM
2898 || (uSS & X86_SEL_RPL) == 3)
2899 {
2900 if (!EMIsRawRing3Enabled(pVM))
2901 return EMSTATE_REM;
2902
2903 if (!(EFlags.u32 & X86_EFL_IF))
2904 {
2905 Log2(("raw mode refused: IF (RawR3)\n"));
2906 return EMSTATE_REM;
2907 }
2908
2909 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
2910 {
2911 Log2(("raw mode refused: CR0.WP + RawR0\n"));
2912 return EMSTATE_REM;
2913 }
2914 }
2915 else
2916 {
2917 if (!EMIsRawRing0Enabled(pVM))
2918 return EMSTATE_REM;
2919
2920 /* Only ring 0 supervisor code. */
2921 if ((uSS & X86_SEL_RPL) != 0)
2922 {
2923 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
2924 return EMSTATE_REM;
2925 }
2926
2927 // Let's start with pure 32 bits ring 0 code first
2928 /** @todo What's pure 32-bit mode? flat? */
2929 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
2930 || !(pCtx->csHid.Attr.n.u1DefBig))
2931 {
2932 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
2933 return EMSTATE_REM;
2934 }
2935
2936 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
2937 if (!(u32CR0 & X86_CR0_WP))
2938 {
2939 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
2940 return EMSTATE_REM;
2941 }
2942
2943 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
2944 {
2945 Log2(("raw r0 mode forced: patch code\n"));
2946 return EMSTATE_RAW;
2947 }
2948
2949#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
2950 if (!(EFlags.u32 & X86_EFL_IF))
2951 {
2952 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
2953 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
2954 return EMSTATE_REM;
2955 }
2956#endif
2957
2958 /** @todo still necessary??? */
2959 if (EFlags.Bits.u2IOPL != 0)
2960 {
2961 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
2962 return EMSTATE_REM;
2963 }
2964 }
2965
2966 Assert(PGMPhysIsA20Enabled(pVCpu));
2967 return EMSTATE_RAW;
2968}
2969
2970
2971/**
2972 * Executes all high priority post execution force actions.
2973 *
2974 * @returns rc or a fatal status code.
2975 *
2976 * @param pVM VM handle.
2977 * @param pVCpu VMCPU handle.
2978 * @param rc The current rc.
2979 */
2980static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
2981{
2982 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
2983 PDMCritSectFF(pVCpu);
2984
2985 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
2986 CSAMR3DoPendingAction(pVM, pVCpu);
2987
2988 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2989 {
2990 if ( rc > VINF_EM_NO_MEMORY
2991 && rc <= VINF_EM_LAST)
2992 rc = VINF_EM_NO_MEMORY;
2993 }
2994
2995 return rc;
2996}
2997
2998
2999/**
3000 * Executes all pending forced actions.
3001 *
3002 * Forced actions can cause execution delays and execution
3003 * rescheduling. The first we deal with using action priority, so
3004 * that for instance pending timers aren't scheduled and ran until
3005 * right before execution. The rescheduling we deal with using
3006 * return codes. The same goes for VM termination, only in that case
3007 * we exit everything.
3008 *
3009 * @returns VBox status code of equal or greater importance/severity than rc.
3010 * The most important ones are: VINF_EM_RESCHEDULE,
3011 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
3012 *
3013 * @param pVM VM handle.
3014 * @param pVCpu VMCPU handle.
3015 * @param rc The current rc.
3016 *
3017 */
3018static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
3019{
3020 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
3021#ifdef VBOX_STRICT
3022 int rcIrq = VINF_SUCCESS;
3023#endif
3024 int rc2;
3025#define UPDATE_RC() \
3026 do { \
3027 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
3028 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
3029 break; \
3030 if (!rc || rc2 < rc) \
3031 rc = rc2; \
3032 } while (0)
3033
3034 /*
3035 * Post execution chunk first.
3036 */
3037 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
3038 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
3039 {
3040 /*
3041 * EMT Rendezvous (must be serviced before termination).
3042 */
3043 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3044 VMMR3EmtRendezvousFF(pVM, pVCpu);
3045
3046 /*
3047 * Termination request.
3048 */
3049 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3050 {
3051 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3052 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3053 return VINF_EM_TERMINATE;
3054 }
3055
3056 /*
3057 * Debugger Facility polling.
3058 */
3059 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
3060 {
3061 rc2 = DBGFR3VMMForcedAction(pVM);
3062 UPDATE_RC();
3063 }
3064
3065 /*
3066 * Postponed reset request.
3067 */
3068 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
3069 {
3070 rc2 = VMR3Reset(pVM);
3071 UPDATE_RC();
3072 }
3073
3074 /*
3075 * CSAM page scanning.
3076 */
3077 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3078 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
3079 {
3080 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
3081
3082 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
3083 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
3084
3085 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
3086 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
3087 }
3088
3089 /*
3090 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
3091 */
3092 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3093 {
3094 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3095 UPDATE_RC();
3096 if (rc == VINF_EM_NO_MEMORY)
3097 return rc;
3098 }
3099
3100 /* check that we got them all */
3101 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
3102 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
3103 }
3104
3105 /*
3106 * Normal priority then.
3107 * (Executed in no particular order.)
3108 */
3109 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
3110 {
3111 /*
3112 * PDM Queues are pending.
3113 */
3114 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
3115 PDMR3QueueFlushAll(pVM);
3116
3117 /*
3118 * PDM DMA transfers are pending.
3119 */
3120 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
3121 PDMR3DmaRun(pVM);
3122
3123 /*
3124 * EMT Rendezvous (make sure they are handled before the requests).
3125 */
3126 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3127 VMMR3EmtRendezvousFF(pVM, pVCpu);
3128
3129 /*
3130 * Requests from other threads.
3131 */
3132 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
3133 {
3134 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
3135 Assert(rc2 != VINF_EM_RESET); /* should be per-VCPU */
3136 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3137 {
3138 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3139 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3140 return rc2;
3141 }
3142 UPDATE_RC();
3143 }
3144
3145 /* Replay the handler notification changes. */
3146 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
3147 {
3148 /* Try not to cause deadlocks. */
3149 if ( pVM->cCPUs == 1
3150 || ( !PGMIsLockOwner(pVM)
3151 && !IOMIsLockOwner(pVM))
3152 )
3153 {
3154 EMRemLock(pVM);
3155 REMR3ReplayHandlerNotifications(pVM);
3156 EMRemUnlock(pVM);
3157 }
3158 }
3159
3160 /* check that we got them all */
3161 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
3162 }
3163
3164 /*
3165 * Normal priority then. (per-VCPU)
3166 * (Executed in no particular order.)
3167 */
3168 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3169 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
3170 {
3171 /*
3172 * Requests from other threads.
3173 */
3174 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
3175 {
3176 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
3177 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
3178 {
3179 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
3180 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3181 return rc2;
3182 }
3183 UPDATE_RC();
3184 }
3185
3186 /* check that we got them all */
3187 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
3188 }
3189
3190 /*
3191 * High priority pre execution chunk last.
3192 * (Executed in ascending priority order.)
3193 */
3194 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
3195 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
3196 {
3197 /*
3198 * Timers before interrupts.
3199 */
3200 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
3201 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3202 TMR3TimerQueuesDo(pVM);
3203
3204 /*
3205 * The instruction following an emulated STI should *always* be executed!
3206 */
3207 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3208 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3209 {
3210 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
3211 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
3212 {
3213 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3214 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3215 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3216 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3217 */
3218 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3219 }
3220 if (HWACCMR3IsActive(pVCpu))
3221 rc2 = VINF_EM_RESCHEDULE_HWACC;
3222 else
3223 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3224
3225 UPDATE_RC();
3226 }
3227
3228 /*
3229 * Interrupts.
3230 */
3231 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
3232 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3233 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
3234 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3235 && PATMAreInterruptsEnabled(pVM)
3236 && !HWACCMR3IsEventPending(pVCpu))
3237 {
3238 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
3239 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3240 {
3241 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3242 /** @todo this really isn't nice, should properly handle this */
3243 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
3244#ifdef VBOX_STRICT
3245 rcIrq = rc2;
3246#endif
3247 UPDATE_RC();
3248 }
3249 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3250 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
3251 {
3252 rc2 = VINF_EM_RESCHEDULE_REM;
3253 UPDATE_RC();
3254 }
3255 }
3256
3257 /*
3258 * Allocate handy pages.
3259 */
3260 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
3261 {
3262 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3263 UPDATE_RC();
3264 }
3265
3266 /*
3267 * Debugger Facility request.
3268 */
3269 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
3270 {
3271 rc2 = DBGFR3VMMForcedAction(pVM);
3272 UPDATE_RC();
3273 }
3274
3275 /*
3276 * EMT Rendezvous (must be serviced before termination).
3277 */
3278 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
3279 VMMR3EmtRendezvousFF(pVM, pVCpu);
3280
3281 /*
3282 * Termination request.
3283 */
3284 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
3285 {
3286 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3287 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3288 return VINF_EM_TERMINATE;
3289 }
3290
3291 /*
3292 * Out of memory? Since most of our fellow high priority actions may cause us
3293 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
3294 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
3295 * than us since we can terminate without allocating more memory.
3296 */
3297 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
3298 {
3299 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3300 UPDATE_RC();
3301 if (rc == VINF_EM_NO_MEMORY)
3302 return rc;
3303 }
3304
3305 /*
3306 * If the virtual sync clock is still stopped, make TM restart it.
3307 */
3308 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
3309 TMR3VirtualSyncFF(pVM, pVCpu);
3310
3311#ifdef DEBUG
3312 /*
3313 * Debug, pause the VM.
3314 */
3315 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
3316 {
3317 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3318 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3319 return VINF_EM_SUSPEND;
3320 }
3321#endif
3322
3323 /* check that we got them all */
3324 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
3325 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
3326 }
3327
3328#undef UPDATE_RC
3329 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
3330 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
3331 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3332 return rc;
3333}
3334
3335/**
3336 * Release the IOM lock if owned by the current VCPU
3337 *
3338 * @param pVM The VM to operate on.
3339 */
3340VMMR3DECL(void) EMR3ReleaseOwnedLocks(PVM pVM)
3341{
3342 while (PDMCritSectIsOwner(&pVM->em.s.CritSectREM))
3343 PDMCritSectLeave(&pVM->em.s.CritSectREM);
3344}
3345
3346
3347/**
3348 * Execute VM.
3349 *
3350 * This function is the main loop of the VM. The emulation thread
3351 * calls this function when the VM has been successfully constructed
3352 * and we're ready for executing the VM.
3353 *
3354 * Returning from this function means that the VM is turned off or
3355 * suspended (state already saved) and deconstruction in next in line.
3356 *
3357 * All interaction from other thread are done using forced actions
3358 * and signaling of the wait object.
3359 *
3360 * @returns VBox status code, informational status codes may indicate failure.
3361 * @param pVM The VM to operate on.
3362 * @param pVCpu The VMCPU to operate on.
3363 */
3364VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
3365{
3366 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3367 pVCpu->em.s.enmState, EMR3GetStateName(pVCpu->em.s.enmState), pVCpu->em.s.fForceRAW));
3368 VM_ASSERT_EMT(pVM);
3369 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
3370 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
3371 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
3372 ("%s\n", EMR3GetStateName(pVCpu->em.s.enmState)));
3373
3374 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
3375 if (rc == 0)
3376 {
3377 /*
3378 * Start the virtual time.
3379 */
3380 TMR3NotifyResume(pVM, pVCpu);
3381
3382 /*
3383 * The Outer Main Loop.
3384 */
3385 bool fFFDone = false;
3386
3387 /* Reschedule right away to start in the right state. */
3388 rc = VINF_SUCCESS;
3389
3390 /* If resuming after a pause or a state load, restore the previous
3391 state or else we'll start executing code. Else, just reschedule. */
3392 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
3393 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3394 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
3395 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3396 else
3397 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3398
3399 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3400 for (;;)
3401 {
3402 /*
3403 * Before we can schedule anything (we're here because
3404 * scheduling is required) we must service any pending
3405 * forced actions to avoid any pending action causing
3406 * immediate rescheduling upon entering an inner loop
3407 *
3408 * Do forced actions.
3409 */
3410 if ( !fFFDone
3411 && rc != VINF_EM_TERMINATE
3412 && rc != VINF_EM_OFF
3413 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
3414 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
3415 {
3416 rc = emR3ForcedActions(pVM, pVCpu, rc);
3417 if ( ( rc == VINF_EM_RESCHEDULE_REM
3418 || rc == VINF_EM_RESCHEDULE_HWACC)
3419 && pVCpu->em.s.fForceRAW)
3420 rc = VINF_EM_RESCHEDULE_RAW;
3421 }
3422 else if (fFFDone)
3423 fFFDone = false;
3424
3425 /*
3426 * Now what to do?
3427 */
3428 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
3429 switch (rc)
3430 {
3431 /*
3432 * Keep doing what we're currently doing.
3433 */
3434 case VINF_SUCCESS:
3435 break;
3436
3437 /*
3438 * Reschedule - to raw-mode execution.
3439 */
3440 case VINF_EM_RESCHEDULE_RAW:
3441 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
3442 pVCpu->em.s.enmState = EMSTATE_RAW;
3443 break;
3444
3445 /*
3446 * Reschedule - to hardware accelerated raw-mode execution.
3447 */
3448 case VINF_EM_RESCHEDULE_HWACC:
3449 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
3450 Assert(!pVCpu->em.s.fForceRAW);
3451 pVCpu->em.s.enmState = EMSTATE_HWACC;
3452 break;
3453
3454 /*
3455 * Reschedule - to recompiled execution.
3456 */
3457 case VINF_EM_RESCHEDULE_REM:
3458 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
3459 pVCpu->em.s.enmState = EMSTATE_REM;
3460 break;
3461
3462#ifdef VBOX_WITH_VMI
3463 /*
3464 * Reschedule - parav call.
3465 */
3466 case VINF_EM_RESCHEDULE_PARAV:
3467 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
3468 pVCpu->em.s.enmState = EMSTATE_PARAV;
3469 break;
3470#endif
3471
3472 /*
3473 * Resume.
3474 */
3475 case VINF_EM_RESUME:
3476 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
3477 /* Don't reschedule in the halted or wait for SIPI case. */
3478 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
3479 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
3480 break;
3481 /* fall through and get scheduled. */
3482
3483 /*
3484 * Reschedule.
3485 */
3486 case VINF_EM_RESCHEDULE:
3487 {
3488 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3489 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3490 pVCpu->em.s.enmState = enmState;
3491 break;
3492 }
3493
3494 /*
3495 * Halted.
3496 */
3497 case VINF_EM_HALT:
3498 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
3499 pVCpu->em.s.enmState = EMSTATE_HALTED;
3500 break;
3501
3502 /*
3503 * Switch to the wait for SIPI state (application processor only)
3504 */
3505 case VINF_EM_WAIT_SIPI:
3506 Assert(pVCpu->idCpu != 0);
3507 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
3508 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
3509 break;
3510
3511
3512 /*
3513 * Suspend.
3514 */
3515 case VINF_EM_SUSPEND:
3516 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3517 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3518 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3519 break;
3520
3521 /*
3522 * Reset.
3523 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3524 */
3525 case VINF_EM_RESET:
3526 {
3527 if (pVCpu->idCpu == 0)
3528 {
3529 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
3530 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3531 pVCpu->em.s.enmState = enmState;
3532 }
3533 else
3534 {
3535 /* All other VCPUs go into the wait for SIPI state. */
3536 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
3537 }
3538 break;
3539 }
3540
3541 /*
3542 * Power Off.
3543 */
3544 case VINF_EM_OFF:
3545 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3546 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3547 TMR3NotifySuspend(pVM, pVCpu);
3548 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3549 return rc;
3550
3551 /*
3552 * Terminate the VM.
3553 */
3554 case VINF_EM_TERMINATE:
3555 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3556 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
3557 TMR3NotifySuspend(pVM, pVCpu);
3558 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3559 return rc;
3560
3561
3562 /*
3563 * Out of memory, suspend the VM and stuff.
3564 */
3565 case VINF_EM_NO_MEMORY:
3566 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
3567 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3568 TMR3NotifySuspend(pVM, pVCpu);
3569 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3570
3571 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
3572 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
3573 if (rc != VINF_EM_SUSPEND)
3574 {
3575 if (RT_SUCCESS_NP(rc))
3576 {
3577 AssertLogRelMsgFailed(("%Rrc\n", rc));
3578 rc = VERR_EM_INTERNAL_ERROR;
3579 }
3580 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3581 }
3582 return rc;
3583
3584 /*
3585 * Guest debug events.
3586 */
3587 case VINF_EM_DBG_STEPPED:
3588 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3589 case VINF_EM_DBG_STOP:
3590 case VINF_EM_DBG_BREAKPOINT:
3591 case VINF_EM_DBG_STEP:
3592 if (pVCpu->em.s.enmState == EMSTATE_RAW)
3593 {
3594 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3595 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3596 }
3597 else
3598 {
3599 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3600 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3601 }
3602 break;
3603
3604 /*
3605 * Hypervisor debug events.
3606 */
3607 case VINF_EM_DBG_HYPER_STEPPED:
3608 case VINF_EM_DBG_HYPER_BREAKPOINT:
3609 case VINF_EM_DBG_HYPER_ASSERTION:
3610 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
3611 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
3612 break;
3613
3614 /*
3615 * Guru mediations.
3616 */
3617 case VERR_VMM_RING0_ASSERTION:
3618 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
3619 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3620 break;
3621
3622 /*
3623 * Any error code showing up here other than the ones we
3624 * know and process above are considered to be FATAL.
3625 *
3626 * Unknown warnings and informational status codes are also
3627 * included in this.
3628 */
3629 default:
3630 if (RT_SUCCESS_NP(rc))
3631 {
3632 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
3633 rc = VERR_EM_INTERNAL_ERROR;
3634 }
3635 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3636 Log(("EMR3ExecuteVM returns %d\n", rc));
3637 break;
3638 }
3639
3640 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
3641 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3642
3643 /*
3644 * Act on the state.
3645 */
3646 switch (pVCpu->em.s.enmState)
3647 {
3648 /*
3649 * Execute raw.
3650 */
3651 case EMSTATE_RAW:
3652 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
3653 break;
3654
3655 /*
3656 * Execute hardware accelerated raw.
3657 */
3658 case EMSTATE_HWACC:
3659 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
3660 break;
3661
3662 /*
3663 * Execute recompiled.
3664 */
3665 case EMSTATE_REM:
3666 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
3667 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
3668 break;
3669
3670#ifdef VBOX_WITH_VMI
3671 /*
3672 * Execute PARAV function.
3673 */
3674 case EMSTATE_PARAV:
3675 rc = PARAVCallFunction(pVM);
3676 pVCpu->em.s.enmState = EMSTATE_REM;
3677 break;
3678#endif
3679
3680 /*
3681 * Application processor execution halted until SIPI.
3682 */
3683 case EMSTATE_WAIT_SIPI:
3684 /* no break */
3685 /*
3686 * hlt - execution halted until interrupt.
3687 */
3688 case EMSTATE_HALTED:
3689 {
3690 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
3691 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3692 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3693 break;
3694 }
3695
3696 /*
3697 * Suspended - return to VM.cpp.
3698 */
3699 case EMSTATE_SUSPENDED:
3700 TMR3NotifySuspend(pVM, pVCpu);
3701 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3702 return VINF_EM_SUSPEND;
3703
3704 /*
3705 * Debugging in the guest.
3706 */
3707 case EMSTATE_DEBUG_GUEST_REM:
3708 case EMSTATE_DEBUG_GUEST_RAW:
3709 TMR3NotifySuspend(pVM, pVCpu);
3710 rc = emR3Debug(pVM, pVCpu, rc);
3711 TMR3NotifyResume(pVM, pVCpu);
3712 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3713 break;
3714
3715 /*
3716 * Debugging in the hypervisor.
3717 */
3718 case EMSTATE_DEBUG_HYPER:
3719 {
3720 TMR3NotifySuspend(pVM, pVCpu);
3721 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3722
3723 rc = emR3Debug(pVM, pVCpu, rc);
3724 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3725 if (rc != VINF_SUCCESS)
3726 {
3727 /* switch to guru meditation mode */
3728 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3729 VMMR3FatalDump(pVM, pVCpu, rc);
3730 return rc;
3731 }
3732
3733 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3734 TMR3NotifyResume(pVM, pVCpu);
3735 break;
3736 }
3737
3738 /*
3739 * Guru meditation takes place in the debugger.
3740 */
3741 case EMSTATE_GURU_MEDITATION:
3742 {
3743 TMR3NotifySuspend(pVM, pVCpu);
3744 VMMR3FatalDump(pVM, pVCpu, rc);
3745 emR3Debug(pVM, pVCpu, rc);
3746 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3747 return rc;
3748 }
3749
3750 /*
3751 * The states we don't expect here.
3752 */
3753 case EMSTATE_NONE:
3754 case EMSTATE_TERMINATING:
3755 default:
3756 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3757 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3758 TMR3NotifySuspend(pVM, pVCpu);
3759 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3760 return VERR_EM_INTERNAL_ERROR;
3761 }
3762 } /* The Outer Main Loop */
3763 }
3764 else
3765 {
3766 /*
3767 * Fatal error.
3768 */
3769 LogFlow(("EMR3ExecuteVM: returns %Rrc (longjmp / fatal error)\n", rc));
3770 TMR3NotifySuspend(pVM, pVCpu);
3771 VMMR3FatalDump(pVM, pVCpu, rc);
3772 emR3Debug(pVM, pVCpu, rc);
3773 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3774 /** @todo change the VM state! */
3775 return rc;
3776 }
3777
3778 /* (won't ever get here). */
3779 AssertFailed();
3780}
3781
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette