VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36125

最後變更 在這個檔案從36125是 36066,由 vboxsync 提交於 14 年 前

step+logging: interrupt fix.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 178.7 KB
 
1/* $Id: VBoxRecompiler.c 36066 2011-02-23 17:42:02Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, NULL, NULL,
340 NULL, remR3Save, NULL,
341 NULL, remR3Load, NULL);
342 if (RT_FAILURE(rc))
343 return rc;
344
345#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
346 /*
347 * Debugger commands.
348 */
349 static bool fRegisteredCmds = false;
350 if (!fRegisteredCmds)
351 {
352 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
353 if (RT_SUCCESS(rc))
354 fRegisteredCmds = true;
355 }
356#endif
357
358#ifdef VBOX_WITH_STATISTICS
359 /*
360 * Statistics.
361 */
362 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
363 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
364 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
365 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
366 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
373 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
374
375 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
376
377 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
378 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
379 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
380 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
381 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
382 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
383 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
384 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
385 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
386 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
387 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
388
389 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
390 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
391 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
392 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
393
394 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
400
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
409#endif /* VBOX_WITH_STATISTICS */
410
411 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
412 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
413 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
414
415
416#ifdef DEBUG_ALL_LOGGING
417 loglevel = ~0;
418# ifdef DEBUG_TMP_LOGGING
419 logfile = fopen("/tmp/vbox-qemu.log", "w");
420# endif
421#endif
422
423 /*
424 * Init the handler notification lists.
425 */
426 pVM->rem.s.idxPendingList = UINT32_MAX;
427 pVM->rem.s.idxFreeList = 0;
428
429 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
430 {
431 pCur = &pVM->rem.s.aHandlerNotifications[i];
432 pCur->idxNext = i + 1;
433 pCur->idxSelf = i;
434 }
435 pCur->idxNext = UINT32_MAX; /* the last record. */
436
437 return rc;
438}
439
440
441/**
442 * Finalizes the REM initialization.
443 *
444 * This is called after all components, devices and drivers has
445 * been initialized. Its main purpose it to finish the RAM related
446 * initialization.
447 *
448 * @returns VBox status code.
449 *
450 * @param pVM The VM handle.
451 */
452REMR3DECL(int) REMR3InitFinalize(PVM pVM)
453{
454 int rc;
455
456 /*
457 * Ram size & dirty bit map.
458 */
459 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
460 pVM->rem.s.fGCPhysLastRamFixed = true;
461#ifdef RT_STRICT
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
463#else
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
465#endif
466 return rc;
467}
468
469
470/**
471 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM handle.
475 * @param fGuarded Whether to guard the map.
476 */
477static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
478{
479 int rc = VINF_SUCCESS;
480 RTGCPHYS cb;
481
482 cb = pVM->rem.s.GCPhysLastRam + 1;
483 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
485 VERR_OUT_OF_RANGE);
486 phys_ram_size = cb;
487 phys_ram_dirty_size = cb >> PAGE_SHIFT;
488 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
489
490 if (!fGuarded)
491 {
492 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
493 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
494 }
495 else
496 {
497 /*
498 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
499 */
500 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
501 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
502 if (cbBitmapFull == cbBitmapAligned)
503 cbBitmapFull += _4G >> PAGE_SHIFT;
504 else if (cbBitmapFull - cbBitmapAligned < _64K)
505 cbBitmapFull += _64K;
506
507 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
508 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
509
510 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
511 if (RT_FAILURE(rc))
512 {
513 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
514 AssertLogRelRCReturn(rc, rc);
515 }
516
517 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
518 }
519
520 /* initialize it. */
521 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
522 return rc;
523}
524
525
526/**
527 * Terminates the REM.
528 *
529 * Termination means cleaning up and freeing all resources,
530 * the VM it self is at this point powered off or suspended.
531 *
532 * @returns VBox status code.
533 * @param pVM The VM to operate on.
534 */
535REMR3DECL(int) REMR3Term(PVM pVM)
536{
537#ifdef VBOX_WITH_STATISTICS
538 /*
539 * Statistics.
540 */
541 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
542 STAM_DEREG(pVM, &gStatCompilationQEmu);
543 STAM_DEREG(pVM, &gStatRunCodeQEmu);
544 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
545 STAM_DEREG(pVM, &gStatTimers);
546 STAM_DEREG(pVM, &gStatTBLookup);
547 STAM_DEREG(pVM, &gStatIRQ);
548 STAM_DEREG(pVM, &gStatRawCheck);
549 STAM_DEREG(pVM, &gStatMemRead);
550 STAM_DEREG(pVM, &gStatMemWrite);
551 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
552 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
553
554 STAM_DEREG(pVM, &gStatCpuGetTSC);
555
556 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
557 STAM_DEREG(pVM, &gStatRefuseVM86);
558 STAM_DEREG(pVM, &gStatRefusePaging);
559 STAM_DEREG(pVM, &gStatRefusePAE);
560 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
561 STAM_DEREG(pVM, &gStatRefuseIF0);
562 STAM_DEREG(pVM, &gStatRefuseCode16);
563 STAM_DEREG(pVM, &gStatRefuseWP0);
564 STAM_DEREG(pVM, &gStatRefuseRing1or2);
565 STAM_DEREG(pVM, &gStatRefuseCanExecute);
566 STAM_DEREG(pVM, &gStatFlushTBs);
567
568 STAM_DEREG(pVM, &gStatREMGDTChange);
569 STAM_DEREG(pVM, &gStatREMLDTRChange);
570 STAM_DEREG(pVM, &gStatREMIDTChange);
571 STAM_DEREG(pVM, &gStatREMTRChange);
572
573 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
586
587 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
588#endif /* VBOX_WITH_STATISTICS */
589
590 STAM_REL_DEREG(pVM, &tb_flush_count);
591 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
592 STAM_REL_DEREG(pVM, &tlb_flush_count);
593
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * The VM is being reset.
600 *
601 * For the REM component this means to call the cpu_reset() and
602 * reinitialize some state variables.
603 *
604 * @param pVM VM handle.
605 */
606REMR3DECL(void) REMR3Reset(PVM pVM)
607{
608 /*
609 * Reset the REM cpu.
610 */
611 Assert(pVM->rem.s.cIgnoreAll == 0);
612 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
613 cpu_reset(&pVM->rem.s.Env);
614 pVM->rem.s.cInvalidatedPages = 0;
615 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
616 Assert(pVM->rem.s.cIgnoreAll == 0);
617
618 /* Clear raw ring 0 init state */
619 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
620
621 /* Flush the TBs the next time we execute code here. */
622 pVM->rem.s.fFlushTBs = true;
623}
624
625
626/**
627 * Execute state save operation.
628 *
629 * @returns VBox status code.
630 * @param pVM VM Handle.
631 * @param pSSM SSM operation handle.
632 */
633static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
634{
635 PREM pRem = &pVM->rem.s;
636
637 /*
638 * Save the required CPU Env bits.
639 * (Not much because we're never in REM when doing the save.)
640 */
641 LogFlow(("remR3Save:\n"));
642 Assert(!pRem->fInREM);
643 SSMR3PutU32(pSSM, pRem->Env.hflags);
644 SSMR3PutU32(pSSM, ~0); /* separator */
645
646 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
647 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
648 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
649
650 return SSMR3PutU32(pSSM, ~0); /* terminator */
651}
652
653
654/**
655 * Execute state load operation.
656 *
657 * @returns VBox status code.
658 * @param pVM VM Handle.
659 * @param pSSM SSM operation handle.
660 * @param uVersion Data layout version.
661 * @param uPass The data pass.
662 */
663static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
664{
665 uint32_t u32Dummy;
666 uint32_t fRawRing0 = false;
667 uint32_t u32Sep;
668 uint32_t i;
669 int rc;
670 PREM pRem;
671
672 LogFlow(("remR3Load:\n"));
673 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
674
675 /*
676 * Validate version.
677 */
678 if ( uVersion != REM_SAVED_STATE_VERSION
679 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
680 {
681 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
682 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
683 }
684
685 /*
686 * Do a reset to be on the safe side...
687 */
688 REMR3Reset(pVM);
689
690 /*
691 * Ignore all ignorable notifications.
692 * (Not doing this will cause serious trouble.)
693 */
694 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
695
696 /*
697 * Load the required CPU Env bits.
698 * (Not much because we're never in REM when doing the save.)
699 */
700 pRem = &pVM->rem.s;
701 Assert(!pRem->fInREM);
702 SSMR3GetU32(pSSM, &pRem->Env.hflags);
703 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
704 {
705 /* Redundant REM CPU state has to be loaded, but can be ignored. */
706 CPUX86State_Ver16 temp;
707 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
708 }
709
710 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
711 if (RT_FAILURE(rc))
712 return rc;
713 if (u32Sep != ~0U)
714 {
715 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
716 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
717 }
718
719 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
720 SSMR3GetUInt(pSSM, &fRawRing0);
721 if (fRawRing0)
722 pRem->Env.state |= CPU_RAW_RING0;
723
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /*
727 * Load the REM stuff.
728 */
729 /** @todo r=bird: We should just drop all these items, restoring doesn't make
730 * sense. */
731 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
732 if (RT_FAILURE(rc))
733 return rc;
734 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
735 {
736 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739 for (i = 0; i < pRem->cInvalidatedPages; i++)
740 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
741 }
742
743 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
744 if (RT_FAILURE(rc))
745 return rc;
746
747 /* check the terminator. */
748 rc = SSMR3GetU32(pSSM, &u32Sep);
749 if (RT_FAILURE(rc))
750 return rc;
751 if (u32Sep != ~0U)
752 {
753 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
754 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
755 }
756
757 /*
758 * Get the CPUID features.
759 */
760 PVMCPU pVCpu = VMMGetCpu(pVM);
761 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
762 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
763
764 /*
765 * Sync the Load Flush the TLB
766 */
767 tlb_flush(&pRem->Env, 1);
768
769 /*
770 * Stop ignoring ignorable notifications.
771 */
772 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
773
774 /*
775 * Sync the whole CPU state when executing code in the recompiler.
776 */
777 for (i = 0; i < pVM->cCpus; i++)
778 {
779 PVMCPU pVCpu = &pVM->aCpus[i];
780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
781 }
782 return VINF_SUCCESS;
783}
784
785
786
787#undef LOG_GROUP
788#define LOG_GROUP LOG_GROUP_REM_RUN
789
790/**
791 * Single steps an instruction in recompiled mode.
792 *
793 * Before calling this function the REM state needs to be in sync with
794 * the VM. Call REMR3State() to perform the sync. It's only necessary
795 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
796 * and after calling REMR3StateBack().
797 *
798 * @returns VBox status code.
799 *
800 * @param pVM VM Handle.
801 * @param pVCpu VMCPU Handle.
802 */
803REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
804{
805 int rc, interrupt_request;
806 RTGCPTR GCPtrPC;
807 bool fBp;
808
809 /*
810 * Lock the REM - we don't wanna have anyone interrupting us
811 * while stepping - and enabled single stepping. We also ignore
812 * pending interrupts and suchlike.
813 */
814 interrupt_request = pVM->rem.s.Env.interrupt_request;
815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
816 pVM->rem.s.Env.interrupt_request = 0;
817 cpu_single_step(&pVM->rem.s.Env, 1);
818
819 /*
820 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
821 */
822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
824
825 /*
826 * Execute and handle the return code.
827 * We execute without enabling the cpu tick, so on success we'll
828 * just flip it on and off to make sure it moves
829 */
830 rc = cpu_exec(&pVM->rem.s.Env);
831 if (rc == EXCP_DEBUG)
832 {
833 TMR3NotifyResume(pVM, pVCpu);
834 TMR3NotifySuspend(pVM, pVCpu);
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 else
838 {
839 switch (rc)
840 {
841 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
842 case EXCP_HLT:
843 case EXCP_HALTED: rc = VINF_EM_HALT; break;
844 case EXCP_RC:
845 rc = pVM->rem.s.rc;
846 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
847 break;
848 case EXCP_EXECUTE_RAW:
849 case EXCP_EXECUTE_HWACC:
850 /** @todo: is it correct? No! */
851 rc = VINF_SUCCESS;
852 break;
853 default:
854 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
855 rc = VERR_INTERNAL_ERROR;
856 break;
857 }
858 }
859
860 /*
861 * Restore the stuff we changed to prevent interruption.
862 * Unlock the REM.
863 */
864 if (fBp)
865 {
866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
867 Assert(rc2 == 0); NOREF(rc2);
868 }
869 cpu_single_step(&pVM->rem.s.Env, 0);
870 pVM->rem.s.Env.interrupt_request = interrupt_request;
871
872 return rc;
873}
874
875
876/**
877 * Set a breakpoint using the REM facilities.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle.
881 * @param Address The breakpoint address.
882 * @thread The emulation thread.
883 */
884REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
885{
886 VM_ASSERT_EMT(pVM);
887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
888 {
889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
890 return VINF_SUCCESS;
891 }
892 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
893 return VERR_REM_NO_MORE_BP_SLOTS;
894}
895
896
897/**
898 * Clears a breakpoint set by REMR3BreakpointSet().
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param Address The breakpoint address.
903 * @thread The emulation thread.
904 */
905REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
906{
907 VM_ASSERT_EMT(pVM);
908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
909 {
910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
911 return VINF_SUCCESS;
912 }
913 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
914 return VERR_REM_BP_NOT_FOUND;
915}
916
917
918/**
919 * Emulate an instruction.
920 *
921 * This function executes one instruction without letting anyone
922 * interrupt it. This is intended for being called while being in
923 * raw mode and thus will take care of all the state syncing between
924 * REM and the rest.
925 *
926 * @returns VBox status code.
927 * @param pVM VM handle.
928 * @param pVCpu VMCPU Handle.
929 */
930REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
931{
932 bool fFlushTBs;
933
934 int rc, rc2;
935 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
936
937 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
938 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
939 */
940 if (HWACCMIsEnabled(pVM))
941 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
942
943 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
944 fFlushTBs = pVM->rem.s.fFlushTBs;
945 pVM->rem.s.fFlushTBs = false;
946
947 /*
948 * Sync the state and enable single instruction / single stepping.
949 */
950 rc = REMR3State(pVM, pVCpu);
951 pVM->rem.s.fFlushTBs = fFlushTBs;
952 if (RT_SUCCESS(rc))
953 {
954 int interrupt_request = pVM->rem.s.Env.interrupt_request;
955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
956#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
957 cpu_single_step(&pVM->rem.s.Env, 0);
958#endif
959 Assert(!pVM->rem.s.Env.singlestep_enabled);
960
961 /*
962 * Now we set the execute single instruction flag and enter the cpu_exec loop.
963 */
964 TMNotifyStartOfExecution(pVCpu);
965 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
966 rc = cpu_exec(&pVM->rem.s.Env);
967 TMNotifyEndOfExecution(pVCpu);
968 switch (rc)
969 {
970 /*
971 * Executed without anything out of the way happening.
972 */
973 case EXCP_SINGLE_INSTR:
974 rc = VINF_EM_RESCHEDULE;
975 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
976 break;
977
978 /*
979 * If we take a trap or start servicing a pending interrupt, we might end up here.
980 * (Timer thread or some other thread wishing EMT's attention.)
981 */
982 case EXCP_INTERRUPT:
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
984 rc = VINF_EM_RESCHEDULE;
985 break;
986
987 /*
988 * Single step, we assume!
989 * If there was a breakpoint there we're fucked now.
990 */
991 case EXCP_DEBUG:
992 {
993 /* breakpoint or single step? */
994 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
995 int iBP;
996 rc = VINF_EM_DBG_STEPPED;
997 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
998 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
999 {
1000 rc = VINF_EM_DBG_BREAKPOINT;
1001 break;
1002 }
1003 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1004 break;
1005 }
1006
1007 /*
1008 * hlt instruction.
1009 */
1010 case EXCP_HLT:
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1012 rc = VINF_EM_HALT;
1013 break;
1014
1015 /*
1016 * The VM has halted.
1017 */
1018 case EXCP_HALTED:
1019 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1020 rc = VINF_EM_HALT;
1021 break;
1022
1023 /*
1024 * Switch to RAW-mode.
1025 */
1026 case EXCP_EXECUTE_RAW:
1027 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1028 rc = VINF_EM_RESCHEDULE_RAW;
1029 break;
1030
1031 /*
1032 * Switch to hardware accelerated RAW-mode.
1033 */
1034 case EXCP_EXECUTE_HWACC:
1035 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1036 rc = VINF_EM_RESCHEDULE_HWACC;
1037 break;
1038
1039 /*
1040 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1041 */
1042 case EXCP_RC:
1043 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1044 rc = pVM->rem.s.rc;
1045 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1046 break;
1047
1048 /*
1049 * Figure out the rest when they arrive....
1050 */
1051 default:
1052 AssertMsgFailed(("rc=%d\n", rc));
1053 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1054 rc = VINF_EM_RESCHEDULE;
1055 break;
1056 }
1057
1058 /*
1059 * Switch back the state.
1060 */
1061 pVM->rem.s.Env.interrupt_request = interrupt_request;
1062 rc2 = REMR3StateBack(pVM, pVCpu);
1063 AssertRC(rc2);
1064 }
1065
1066 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1067 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1068 return rc;
1069}
1070
1071
1072/**
1073 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1074 *
1075 * @returns VBox status code.
1076 *
1077 * @param pVM The VM handle.
1078 * @param pVCpu The Virtual CPU handle.
1079 */
1080static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1081{
1082 int rc;
1083
1084 Assert(pVM->rem.s.fInREM);
1085#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1086 cpu_single_step(&pVM->rem.s.Env, 1);
1087#else
1088 Assert(!pVM->rem.s.Env.singlestep_enabled);
1089#endif
1090
1091 /*
1092 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1093 */
1094 for (;;)
1095 {
1096 char szBuf[256];
1097
1098 /*
1099 * Log the current registers state and instruction.
1100 */
1101 remR3StateUpdate(pVM, pVCpu);
1102 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1103 szBuf[0] = '\0';
1104 rc = DBGFR3DisasInstrEx(pVM,
1105 pVCpu->idCpu,
1106 0, /* Sel */
1107 0, /* GCPtr */
1108 DBGF_DISAS_FLAGS_CURRENT_GUEST
1109 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1110 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1111 szBuf,
1112 sizeof(szBuf),
1113 NULL);
1114 if (RT_FAILURE(rc))
1115 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1116 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1117
1118 /*
1119 * Execute the instruction.
1120 */
1121 TMNotifyStartOfExecution(pVCpu);
1122
1123 if ( pVM->rem.s.Env.exception_index < 0
1124 || pVM->rem.s.Env.exception_index > 256)
1125 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1126
1127#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1128 pVM->rem.s.Env.interrupt_request = 0;
1129#else
1130 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1131#endif
1132 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1133 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1134 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1135 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1136 pVM->rem.s.Env.interrupt_request,
1137 pVM->rem.s.Env.halted,
1138 pVM->rem.s.Env.exception_index
1139 );
1140
1141 rc = cpu_exec(&pVM->rem.s.Env);
1142
1143 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1144 pVM->rem.s.Env.interrupt_request,
1145 pVM->rem.s.Env.halted,
1146 pVM->rem.s.Env.exception_index
1147 );
1148
1149 TMNotifyEndOfExecution(pVCpu);
1150
1151 switch (rc)
1152 {
1153#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1154 /*
1155 * The normal exit.
1156 */
1157 case EXCP_SINGLE_INSTR:
1158 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1159 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1160 continue;
1161 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1162 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1163 rc = VINF_SUCCESS;
1164 break;
1165
1166#else
1167 /*
1168 * The normal exit, check for breakpoints at PC just to be sure.
1169 */
1170#endif
1171 case EXCP_DEBUG:
1172 rc = VINF_EM_DBG_STEPPED;
1173 if (pVM->rem.s.Env.nb_breakpoints > 0)
1174 {
1175 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1176 int iBP;
1177 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1178 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1179 {
1180 rc = VINF_EM_DBG_BREAKPOINT;
1181 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC);
1182 break;
1183 }
1184 }
1185#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1186 if (rc == VINF_EM_DBG_STEPPED)
1187 {
1188 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1189 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1190 continue;
1191
1192 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1193 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1194 rc = VINF_SUCCESS;
1195 }
1196#endif
1197 break;
1198
1199 /*
1200 * If we take a trap or start servicing a pending interrupt, we might end up here.
1201 * (Timer thread or some other thread wishing EMT's attention.)
1202 */
1203 case EXCP_INTERRUPT:
1204 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1205 rc = VINF_SUCCESS;
1206 break;
1207
1208 /*
1209 * hlt instruction.
1210 */
1211 case EXCP_HLT:
1212 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1213 rc = VINF_EM_HALT;
1214 break;
1215
1216 /*
1217 * The VM has halted.
1218 */
1219 case EXCP_HALTED:
1220 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1221 rc = VINF_EM_HALT;
1222 break;
1223
1224 /*
1225 * Switch to RAW-mode.
1226 */
1227 case EXCP_EXECUTE_RAW:
1228 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1229 rc = VINF_EM_RESCHEDULE_RAW;
1230 break;
1231
1232 /*
1233 * Switch to hardware accelerated RAW-mode.
1234 */
1235 case EXCP_EXECUTE_HWACC:
1236 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1237 rc = VINF_EM_RESCHEDULE_HWACC;
1238 break;
1239
1240 /*
1241 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1242 */
1243 case EXCP_RC:
1244 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1245 rc = pVM->rem.s.rc;
1246 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1247 break;
1248
1249 /*
1250 * Figure out the rest when they arrive....
1251 */
1252 default:
1253 AssertMsgFailed(("rc=%d\n", rc));
1254 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1255 rc = VINF_EM_RESCHEDULE;
1256 break;
1257 }
1258 break;
1259 }
1260
1261#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1262// cpu_single_step(&pVM->rem.s.Env, 0);
1263#else
1264 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1265#endif
1266 return rc;
1267}
1268
1269
1270/**
1271 * Runs code in recompiled mode.
1272 *
1273 * Before calling this function the REM state needs to be in sync with
1274 * the VM. Call REMR3State() to perform the sync. It's only necessary
1275 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1276 * and after calling REMR3StateBack().
1277 *
1278 * @returns VBox status code.
1279 *
1280 * @param pVM VM Handle.
1281 * @param pVCpu VMCPU Handle.
1282 */
1283REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1284{
1285 int rc;
1286
1287 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1288 return remR3RunLoggingStep(pVM, pVCpu);
1289
1290 Assert(pVM->rem.s.fInREM);
1291 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1292
1293 TMNotifyStartOfExecution(pVCpu);
1294 rc = cpu_exec(&pVM->rem.s.Env);
1295 TMNotifyEndOfExecution(pVCpu);
1296 switch (rc)
1297 {
1298 /*
1299 * This happens when the execution was interrupted
1300 * by an external event, like pending timers.
1301 */
1302 case EXCP_INTERRUPT:
1303 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1304 rc = VINF_SUCCESS;
1305 break;
1306
1307 /*
1308 * hlt instruction.
1309 */
1310 case EXCP_HLT:
1311 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1312 rc = VINF_EM_HALT;
1313 break;
1314
1315 /*
1316 * The VM has halted.
1317 */
1318 case EXCP_HALTED:
1319 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1320 rc = VINF_EM_HALT;
1321 break;
1322
1323 /*
1324 * Breakpoint/single step.
1325 */
1326 case EXCP_DEBUG:
1327 {
1328#if 0//def DEBUG_bird
1329 static int iBP = 0;
1330 printf("howdy, breakpoint! iBP=%d\n", iBP);
1331 switch (iBP)
1332 {
1333 case 0:
1334 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1335 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1336 //pVM->rem.s.Env.interrupt_request = 0;
1337 //pVM->rem.s.Env.exception_index = -1;
1338 //g_fInterruptDisabled = 1;
1339 rc = VINF_SUCCESS;
1340 asm("int3");
1341 break;
1342 default:
1343 asm("int3");
1344 break;
1345 }
1346 iBP++;
1347#else
1348 /* breakpoint or single step? */
1349 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1350 int iBP;
1351 rc = VINF_EM_DBG_STEPPED;
1352 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1353 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1354 {
1355 rc = VINF_EM_DBG_BREAKPOINT;
1356 break;
1357 }
1358 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1359#endif
1360 break;
1361 }
1362
1363 /*
1364 * Switch to RAW-mode.
1365 */
1366 case EXCP_EXECUTE_RAW:
1367 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1368 rc = VINF_EM_RESCHEDULE_RAW;
1369 break;
1370
1371 /*
1372 * Switch to hardware accelerated RAW-mode.
1373 */
1374 case EXCP_EXECUTE_HWACC:
1375 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1376 rc = VINF_EM_RESCHEDULE_HWACC;
1377 break;
1378
1379 /*
1380 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1381 */
1382 case EXCP_RC:
1383 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1384 rc = pVM->rem.s.rc;
1385 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1386 break;
1387
1388 /*
1389 * Figure out the rest when they arrive....
1390 */
1391 default:
1392 AssertMsgFailed(("rc=%d\n", rc));
1393 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1394 rc = VINF_SUCCESS;
1395 break;
1396 }
1397
1398 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1399 return rc;
1400}
1401
1402
1403/**
1404 * Check if the cpu state is suitable for Raw execution.
1405 *
1406 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1407 *
1408 * @param env The CPU env struct.
1409 * @param eip The EIP to check this for (might differ from env->eip).
1410 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1411 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1412 *
1413 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1414 */
1415bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1416{
1417 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1418 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1419 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1420 uint32_t u32CR0;
1421
1422 /* Update counter. */
1423 env->pVM->rem.s.cCanExecuteRaw++;
1424
1425 /* Never when single stepping+logging guest code. */
1426 if (env->state & CPU_EMULATE_SINGLE_STEP)
1427 return false;
1428
1429 if (HWACCMIsEnabled(env->pVM))
1430 {
1431 CPUMCTX Ctx;
1432
1433 env->state |= CPU_RAW_HWACC;
1434
1435 /*
1436 * Create partial context for HWACCMR3CanExecuteGuest
1437 */
1438 Ctx.cr0 = env->cr[0];
1439 Ctx.cr3 = env->cr[3];
1440 Ctx.cr4 = env->cr[4];
1441
1442 Ctx.tr = env->tr.selector;
1443 Ctx.trHid.u64Base = env->tr.base;
1444 Ctx.trHid.u32Limit = env->tr.limit;
1445 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1446
1447 Ctx.ldtr = env->ldt.selector;
1448 Ctx.ldtrHid.u64Base = env->ldt.base;
1449 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1450 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1451
1452 Ctx.idtr.cbIdt = env->idt.limit;
1453 Ctx.idtr.pIdt = env->idt.base;
1454
1455 Ctx.gdtr.cbGdt = env->gdt.limit;
1456 Ctx.gdtr.pGdt = env->gdt.base;
1457
1458 Ctx.rsp = env->regs[R_ESP];
1459 Ctx.rip = env->eip;
1460
1461 Ctx.eflags.u32 = env->eflags;
1462
1463 Ctx.cs = env->segs[R_CS].selector;
1464 Ctx.csHid.u64Base = env->segs[R_CS].base;
1465 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1466 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1467
1468 Ctx.ds = env->segs[R_DS].selector;
1469 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1470 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1471 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1472
1473 Ctx.es = env->segs[R_ES].selector;
1474 Ctx.esHid.u64Base = env->segs[R_ES].base;
1475 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1476 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1477
1478 Ctx.fs = env->segs[R_FS].selector;
1479 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1480 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1481 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1482
1483 Ctx.gs = env->segs[R_GS].selector;
1484 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1485 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1486 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1487
1488 Ctx.ss = env->segs[R_SS].selector;
1489 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1490 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1491 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1492
1493 Ctx.msrEFER = env->efer;
1494
1495 /* Hardware accelerated raw-mode:
1496 *
1497 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1498 */
1499 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1500 {
1501 *piException = EXCP_EXECUTE_HWACC;
1502 return true;
1503 }
1504 return false;
1505 }
1506
1507 /*
1508 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1509 * or 32 bits protected mode ring 0 code
1510 *
1511 * The tests are ordered by the likelihood of being true during normal execution.
1512 */
1513 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1514 {
1515 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1516 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1517 return false;
1518 }
1519
1520#ifndef VBOX_RAW_V86
1521 if (fFlags & VM_MASK) {
1522 STAM_COUNTER_INC(&gStatRefuseVM86);
1523 Log2(("raw mode refused: VM_MASK\n"));
1524 return false;
1525 }
1526#endif
1527
1528 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1529 {
1530#ifndef DEBUG_bird
1531 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1532#endif
1533 return false;
1534 }
1535
1536 if (env->singlestep_enabled)
1537 {
1538 //Log2(("raw mode refused: Single step\n"));
1539 return false;
1540 }
1541
1542 if (env->nb_breakpoints > 0)
1543 {
1544 //Log2(("raw mode refused: Breakpoints\n"));
1545 return false;
1546 }
1547
1548 u32CR0 = env->cr[0];
1549 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1550 {
1551 STAM_COUNTER_INC(&gStatRefusePaging);
1552 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1553 return false;
1554 }
1555
1556 if (env->cr[4] & CR4_PAE_MASK)
1557 {
1558 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1559 {
1560 STAM_COUNTER_INC(&gStatRefusePAE);
1561 return false;
1562 }
1563 }
1564
1565 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1566 {
1567 if (!EMIsRawRing3Enabled(env->pVM))
1568 return false;
1569
1570 if (!(env->eflags & IF_MASK))
1571 {
1572 STAM_COUNTER_INC(&gStatRefuseIF0);
1573 Log2(("raw mode refused: IF (RawR3)\n"));
1574 return false;
1575 }
1576
1577 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1578 {
1579 STAM_COUNTER_INC(&gStatRefuseWP0);
1580 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1581 return false;
1582 }
1583 }
1584 else
1585 {
1586 if (!EMIsRawRing0Enabled(env->pVM))
1587 return false;
1588
1589 // Let's start with pure 32 bits ring 0 code first
1590 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1591 {
1592 STAM_COUNTER_INC(&gStatRefuseCode16);
1593 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1594 return false;
1595 }
1596
1597 // Only R0
1598 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1599 {
1600 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1601 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1602 return false;
1603 }
1604
1605 if (!(u32CR0 & CR0_WP_MASK))
1606 {
1607 STAM_COUNTER_INC(&gStatRefuseWP0);
1608 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1609 return false;
1610 }
1611
1612 if (PATMIsPatchGCAddr(env->pVM, eip))
1613 {
1614 Log2(("raw r0 mode forced: patch code\n"));
1615 *piException = EXCP_EXECUTE_RAW;
1616 return true;
1617 }
1618
1619#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1620 if (!(env->eflags & IF_MASK))
1621 {
1622 STAM_COUNTER_INC(&gStatRefuseIF0);
1623 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1624 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1625 return false;
1626 }
1627#endif
1628
1629 env->state |= CPU_RAW_RING0;
1630 }
1631
1632 /*
1633 * Don't reschedule the first time we're called, because there might be
1634 * special reasons why we're here that is not covered by the above checks.
1635 */
1636 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1637 {
1638 Log2(("raw mode refused: first scheduling\n"));
1639 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1640 return false;
1641 }
1642
1643 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1644 *piException = EXCP_EXECUTE_RAW;
1645 return true;
1646}
1647
1648
1649/**
1650 * Fetches a code byte.
1651 *
1652 * @returns Success indicator (bool) for ease of use.
1653 * @param env The CPU environment structure.
1654 * @param GCPtrInstr Where to fetch code.
1655 * @param pu8Byte Where to store the byte on success
1656 */
1657bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1658{
1659 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1660 if (RT_SUCCESS(rc))
1661 return true;
1662 return false;
1663}
1664
1665
1666/**
1667 * Flush (or invalidate if you like) page table/dir entry.
1668 *
1669 * (invlpg instruction; tlb_flush_page)
1670 *
1671 * @param env Pointer to cpu environment.
1672 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1673 */
1674void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1675{
1676 PVM pVM = env->pVM;
1677 PCPUMCTX pCtx;
1678 int rc;
1679
1680 /*
1681 * When we're replaying invlpg instructions or restoring a saved
1682 * state we disable this path.
1683 */
1684 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1685 return;
1686 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1687 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1688
1689 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1690
1691 /*
1692 * Update the control registers before calling PGMFlushPage.
1693 */
1694 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1695 Assert(pCtx);
1696 pCtx->cr0 = env->cr[0];
1697 pCtx->cr3 = env->cr[3];
1698 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1699 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1700 pCtx->cr4 = env->cr[4];
1701
1702 /*
1703 * Let PGM do the rest.
1704 */
1705 Assert(env->pVCpu);
1706 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1707 if (RT_FAILURE(rc))
1708 {
1709 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1710 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1711 }
1712 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1713}
1714
1715
1716#ifndef REM_PHYS_ADDR_IN_TLB
1717/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1718void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1719{
1720 void *pv;
1721 int rc;
1722
1723 /* Address must be aligned enough to fiddle with lower bits */
1724 Assert((physAddr & 0x3) == 0);
1725
1726 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1727 Assert( rc == VINF_SUCCESS
1728 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1729 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1730 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1731 if (RT_FAILURE(rc))
1732 return (void *)1;
1733 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1734 return (void *)((uintptr_t)pv | 2);
1735 return pv;
1736}
1737#endif /* REM_PHYS_ADDR_IN_TLB */
1738
1739
1740/**
1741 * Called from tlb_protect_code in order to write monitor a code page.
1742 *
1743 * @param env Pointer to the CPU environment.
1744 * @param GCPtr Code page to monitor
1745 */
1746void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1747{
1748#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1749 Assert(env->pVM->rem.s.fInREM);
1750 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1751 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1752 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1753 && !(env->eflags & VM_MASK) /* no V86 mode */
1754 && !HWACCMIsEnabled(env->pVM))
1755 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1756#endif
1757}
1758
1759
1760/**
1761 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1762 *
1763 * @param env Pointer to the CPU environment.
1764 * @param GCPtr Code page to monitor
1765 */
1766void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1767{
1768 Assert(env->pVM->rem.s.fInREM);
1769#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1770 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1771 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1772 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1773 && !(env->eflags & VM_MASK) /* no V86 mode */
1774 && !HWACCMIsEnabled(env->pVM))
1775 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1776#endif
1777}
1778
1779
1780/**
1781 * Called when the CPU is initialized, any of the CRx registers are changed or
1782 * when the A20 line is modified.
1783 *
1784 * @param env Pointer to the CPU environment.
1785 * @param fGlobal Set if the flush is global.
1786 */
1787void remR3FlushTLB(CPUState *env, bool fGlobal)
1788{
1789 PVM pVM = env->pVM;
1790 PCPUMCTX pCtx;
1791
1792 /*
1793 * When we're replaying invlpg instructions or restoring a saved
1794 * state we disable this path.
1795 */
1796 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1797 return;
1798 Assert(pVM->rem.s.fInREM);
1799
1800 /*
1801 * The caller doesn't check cr4, so we have to do that for ourselves.
1802 */
1803 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1804 fGlobal = true;
1805 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1806
1807 /*
1808 * Update the control registers before calling PGMR3FlushTLB.
1809 */
1810 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1811 Assert(pCtx);
1812 pCtx->cr0 = env->cr[0];
1813 pCtx->cr3 = env->cr[3];
1814 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1815 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1816 pCtx->cr4 = env->cr[4];
1817
1818 /*
1819 * Let PGM do the rest.
1820 */
1821 Assert(env->pVCpu);
1822 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1823}
1824
1825
1826/**
1827 * Called when any of the cr0, cr4 or efer registers is updated.
1828 *
1829 * @param env Pointer to the CPU environment.
1830 */
1831void remR3ChangeCpuMode(CPUState *env)
1832{
1833 PVM pVM = env->pVM;
1834 uint64_t efer;
1835 PCPUMCTX pCtx;
1836 int rc;
1837
1838 /*
1839 * When we're replaying loads or restoring a saved
1840 * state this path is disabled.
1841 */
1842 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1843 return;
1844 Assert(pVM->rem.s.fInREM);
1845
1846 /*
1847 * Update the control registers before calling PGMChangeMode()
1848 * as it may need to map whatever cr3 is pointing to.
1849 */
1850 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1851 Assert(pCtx);
1852 pCtx->cr0 = env->cr[0];
1853 pCtx->cr3 = env->cr[3];
1854 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1855 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1856 pCtx->cr4 = env->cr[4];
1857
1858#ifdef TARGET_X86_64
1859 efer = env->efer;
1860#else
1861 efer = 0;
1862#endif
1863 Assert(env->pVCpu);
1864 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1865 if (rc != VINF_SUCCESS)
1866 {
1867 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1868 {
1869 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1870 remR3RaiseRC(env->pVM, rc);
1871 }
1872 else
1873 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1874 }
1875}
1876
1877
1878/**
1879 * Called from compiled code to run dma.
1880 *
1881 * @param env Pointer to the CPU environment.
1882 */
1883void remR3DmaRun(CPUState *env)
1884{
1885 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1886 PDMR3DmaRun(env->pVM);
1887 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1888}
1889
1890
1891/**
1892 * Called from compiled code to schedule pending timers in VMM
1893 *
1894 * @param env Pointer to the CPU environment.
1895 */
1896void remR3TimersRun(CPUState *env)
1897{
1898 LogFlow(("remR3TimersRun:\n"));
1899 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1900 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1901 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1902 TMR3TimerQueuesDo(env->pVM);
1903 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1904 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1905}
1906
1907
1908/**
1909 * Record trap occurrence
1910 *
1911 * @returns VBox status code
1912 * @param env Pointer to the CPU environment.
1913 * @param uTrap Trap nr
1914 * @param uErrorCode Error code
1915 * @param pvNextEIP Next EIP
1916 */
1917int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1918{
1919 PVM pVM = env->pVM;
1920#ifdef VBOX_WITH_STATISTICS
1921 static STAMCOUNTER s_aStatTrap[255];
1922 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1923#endif
1924
1925#ifdef VBOX_WITH_STATISTICS
1926 if (uTrap < 255)
1927 {
1928 if (!s_aRegisters[uTrap])
1929 {
1930 char szStatName[64];
1931 s_aRegisters[uTrap] = true;
1932 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1933 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1934 }
1935 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1936 }
1937#endif
1938 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1939 if( uTrap < 0x20
1940 && (env->cr[0] & X86_CR0_PE)
1941 && !(env->eflags & X86_EFL_VM))
1942 {
1943#ifdef DEBUG
1944 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1945#endif
1946 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1947 {
1948 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1949 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1950 return VERR_REM_TOO_MANY_TRAPS;
1951 }
1952 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1953 pVM->rem.s.cPendingExceptions = 1;
1954 pVM->rem.s.uPendingException = uTrap;
1955 pVM->rem.s.uPendingExcptEIP = env->eip;
1956 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1957 }
1958 else
1959 {
1960 pVM->rem.s.cPendingExceptions = 0;
1961 pVM->rem.s.uPendingException = uTrap;
1962 pVM->rem.s.uPendingExcptEIP = env->eip;
1963 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1964 }
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/*
1970 * Clear current active trap
1971 *
1972 * @param pVM VM Handle.
1973 */
1974void remR3TrapClear(PVM pVM)
1975{
1976 pVM->rem.s.cPendingExceptions = 0;
1977 pVM->rem.s.uPendingException = 0;
1978 pVM->rem.s.uPendingExcptEIP = 0;
1979 pVM->rem.s.uPendingExcptCR2 = 0;
1980}
1981
1982
1983/*
1984 * Record previous call instruction addresses
1985 *
1986 * @param env Pointer to the CPU environment.
1987 */
1988void remR3RecordCall(CPUState *env)
1989{
1990 CSAMR3RecordCallAddress(env->pVM, env->eip);
1991}
1992
1993
1994/**
1995 * Syncs the internal REM state with the VM.
1996 *
1997 * This must be called before REMR3Run() is invoked whenever when the REM
1998 * state is not up to date. Calling it several times in a row is not
1999 * permitted.
2000 *
2001 * @returns VBox status code.
2002 *
2003 * @param pVM VM Handle.
2004 * @param pVCpu VMCPU Handle.
2005 *
2006 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2007 * no do this since the majority of the callers don't want any unnecessary of events
2008 * pending that would immediately interrupt execution.
2009 */
2010REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2011{
2012 register const CPUMCTX *pCtx;
2013 register unsigned fFlags;
2014 bool fHiddenSelRegsValid;
2015 unsigned i;
2016 TRPMEVENT enmType;
2017 uint8_t u8TrapNo;
2018 uint32_t uCpl;
2019 int rc;
2020
2021 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2022 Log2(("REMR3State:\n"));
2023
2024 pVM->rem.s.Env.pVCpu = pVCpu;
2025 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2026 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2027
2028 Assert(!pVM->rem.s.fInREM);
2029 pVM->rem.s.fInStateSync = true;
2030
2031 /*
2032 * If we have to flush TBs, do that immediately.
2033 */
2034 if (pVM->rem.s.fFlushTBs)
2035 {
2036 STAM_COUNTER_INC(&gStatFlushTBs);
2037 tb_flush(&pVM->rem.s.Env);
2038 pVM->rem.s.fFlushTBs = false;
2039 }
2040
2041 /*
2042 * Copy the registers which require no special handling.
2043 */
2044#ifdef TARGET_X86_64
2045 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2046 Assert(R_EAX == 0);
2047 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2048 Assert(R_ECX == 1);
2049 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2050 Assert(R_EDX == 2);
2051 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2052 Assert(R_EBX == 3);
2053 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2054 Assert(R_ESP == 4);
2055 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2056 Assert(R_EBP == 5);
2057 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2058 Assert(R_ESI == 6);
2059 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2060 Assert(R_EDI == 7);
2061 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2062 pVM->rem.s.Env.regs[8] = pCtx->r8;
2063 pVM->rem.s.Env.regs[9] = pCtx->r9;
2064 pVM->rem.s.Env.regs[10] = pCtx->r10;
2065 pVM->rem.s.Env.regs[11] = pCtx->r11;
2066 pVM->rem.s.Env.regs[12] = pCtx->r12;
2067 pVM->rem.s.Env.regs[13] = pCtx->r13;
2068 pVM->rem.s.Env.regs[14] = pCtx->r14;
2069 pVM->rem.s.Env.regs[15] = pCtx->r15;
2070
2071 pVM->rem.s.Env.eip = pCtx->rip;
2072
2073 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2074#else
2075 Assert(R_EAX == 0);
2076 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2077 Assert(R_ECX == 1);
2078 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2079 Assert(R_EDX == 2);
2080 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2081 Assert(R_EBX == 3);
2082 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2083 Assert(R_ESP == 4);
2084 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2085 Assert(R_EBP == 5);
2086 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2087 Assert(R_ESI == 6);
2088 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2089 Assert(R_EDI == 7);
2090 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2091 pVM->rem.s.Env.eip = pCtx->eip;
2092
2093 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2094#endif
2095
2096 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2097
2098 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2099 for (i=0;i<8;i++)
2100 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2101
2102 /*
2103 * Clear the halted hidden flag (the interrupt waking up the CPU can
2104 * have been dispatched in raw mode).
2105 */
2106 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2107
2108 /*
2109 * Replay invlpg?
2110 */
2111 if (pVM->rem.s.cInvalidatedPages)
2112 {
2113 RTUINT i;
2114
2115 pVM->rem.s.fIgnoreInvlPg = true;
2116 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2117 {
2118 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2119 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2120 }
2121 pVM->rem.s.fIgnoreInvlPg = false;
2122 pVM->rem.s.cInvalidatedPages = 0;
2123 }
2124
2125 /* Replay notification changes. */
2126 REMR3ReplayHandlerNotifications(pVM);
2127
2128 /* Update MSRs; before CRx registers! */
2129 pVM->rem.s.Env.efer = pCtx->msrEFER;
2130 pVM->rem.s.Env.star = pCtx->msrSTAR;
2131 pVM->rem.s.Env.pat = pCtx->msrPAT;
2132#ifdef TARGET_X86_64
2133 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2134 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2135 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2136 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2137
2138 /* Update the internal long mode activate flag according to the new EFER value. */
2139 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2140 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2141 else
2142 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2143#endif
2144
2145 /*
2146 * Registers which are rarely changed and require special handling / order when changed.
2147 */
2148 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2149 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2150 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2151 | CPUM_CHANGED_CR4
2152 | CPUM_CHANGED_CR0
2153 | CPUM_CHANGED_CR3
2154 | CPUM_CHANGED_GDTR
2155 | CPUM_CHANGED_IDTR
2156 | CPUM_CHANGED_SYSENTER_MSR
2157 | CPUM_CHANGED_LDTR
2158 | CPUM_CHANGED_CPUID
2159 | CPUM_CHANGED_FPU_REM
2160 )
2161 )
2162 {
2163 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2164 {
2165 pVM->rem.s.fIgnoreCR3Load = true;
2166 tlb_flush(&pVM->rem.s.Env, true);
2167 pVM->rem.s.fIgnoreCR3Load = false;
2168 }
2169
2170 /* CR4 before CR0! */
2171 if (fFlags & CPUM_CHANGED_CR4)
2172 {
2173 pVM->rem.s.fIgnoreCR3Load = true;
2174 pVM->rem.s.fIgnoreCpuMode = true;
2175 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2176 pVM->rem.s.fIgnoreCpuMode = false;
2177 pVM->rem.s.fIgnoreCR3Load = false;
2178 }
2179
2180 if (fFlags & CPUM_CHANGED_CR0)
2181 {
2182 pVM->rem.s.fIgnoreCR3Load = true;
2183 pVM->rem.s.fIgnoreCpuMode = true;
2184 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2185 pVM->rem.s.fIgnoreCpuMode = false;
2186 pVM->rem.s.fIgnoreCR3Load = false;
2187 }
2188
2189 if (fFlags & CPUM_CHANGED_CR3)
2190 {
2191 pVM->rem.s.fIgnoreCR3Load = true;
2192 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2193 pVM->rem.s.fIgnoreCR3Load = false;
2194 }
2195
2196 if (fFlags & CPUM_CHANGED_GDTR)
2197 {
2198 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2199 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2200 }
2201
2202 if (fFlags & CPUM_CHANGED_IDTR)
2203 {
2204 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2205 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2206 }
2207
2208 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2209 {
2210 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2211 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2212 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2213 }
2214
2215 if (fFlags & CPUM_CHANGED_LDTR)
2216 {
2217 if (fHiddenSelRegsValid)
2218 {
2219 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2220 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2221 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2222 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2223 }
2224 else
2225 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2226 }
2227
2228 if (fFlags & CPUM_CHANGED_CPUID)
2229 {
2230 uint32_t u32Dummy;
2231
2232 /*
2233 * Get the CPUID features.
2234 */
2235 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2236 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2237 }
2238
2239 /* Sync FPU state after CR4, CPUID and EFER (!). */
2240 if (fFlags & CPUM_CHANGED_FPU_REM)
2241 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2242 }
2243
2244 /*
2245 * Sync TR unconditionally to make life simpler.
2246 */
2247 pVM->rem.s.Env.tr.selector = pCtx->tr;
2248 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2249 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2250 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2251 /* Note! do_interrupt will fault if the busy flag is still set... */
2252 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2253
2254 /*
2255 * Update selector registers.
2256 * This must be done *after* we've synced gdt, ldt and crX registers
2257 * since we're reading the GDT/LDT om sync_seg. This will happen with
2258 * saved state which takes a quick dip into rawmode for instance.
2259 */
2260 /*
2261 * Stack; Note first check this one as the CPL might have changed. The
2262 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2263 */
2264
2265 if (fHiddenSelRegsValid)
2266 {
2267 /* The hidden selector registers are valid in the CPU context. */
2268 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2269
2270 /* Set current CPL */
2271 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2272
2273 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2277 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2278 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2279 }
2280 else
2281 {
2282 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2283 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2284 {
2285 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2286
2287 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2288 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2289#ifdef VBOX_WITH_STATISTICS
2290 if (pVM->rem.s.Env.segs[R_SS].newselector)
2291 {
2292 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2293 }
2294#endif
2295 }
2296 else
2297 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2298
2299 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2300 {
2301 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2302 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2303#ifdef VBOX_WITH_STATISTICS
2304 if (pVM->rem.s.Env.segs[R_ES].newselector)
2305 {
2306 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2307 }
2308#endif
2309 }
2310 else
2311 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2312
2313 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2314 {
2315 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2316 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2317#ifdef VBOX_WITH_STATISTICS
2318 if (pVM->rem.s.Env.segs[R_CS].newselector)
2319 {
2320 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2321 }
2322#endif
2323 }
2324 else
2325 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2326
2327 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2328 {
2329 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2330 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2331#ifdef VBOX_WITH_STATISTICS
2332 if (pVM->rem.s.Env.segs[R_DS].newselector)
2333 {
2334 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2335 }
2336#endif
2337 }
2338 else
2339 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2340
2341 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2342 * be the same but not the base/limit. */
2343 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2344 {
2345 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2346 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2347#ifdef VBOX_WITH_STATISTICS
2348 if (pVM->rem.s.Env.segs[R_FS].newselector)
2349 {
2350 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2351 }
2352#endif
2353 }
2354 else
2355 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2356
2357 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2358 {
2359 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2360 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2361#ifdef VBOX_WITH_STATISTICS
2362 if (pVM->rem.s.Env.segs[R_GS].newselector)
2363 {
2364 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2365 }
2366#endif
2367 }
2368 else
2369 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2370 }
2371
2372 /*
2373 * Check for traps.
2374 */
2375 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2376 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2377 if (RT_SUCCESS(rc))
2378 {
2379#ifdef DEBUG
2380 if (u8TrapNo == 0x80)
2381 {
2382 remR3DumpLnxSyscall(pVCpu);
2383 remR3DumpOBsdSyscall(pVCpu);
2384 }
2385#endif
2386
2387 pVM->rem.s.Env.exception_index = u8TrapNo;
2388 if (enmType != TRPM_SOFTWARE_INT)
2389 {
2390 pVM->rem.s.Env.exception_is_int = 0;
2391 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2392 }
2393 else
2394 {
2395 /*
2396 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2397 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2398 * for int03 and into.
2399 */
2400 pVM->rem.s.Env.exception_is_int = 1;
2401 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2402 /* int 3 may be generated by one-byte 0xcc */
2403 if (u8TrapNo == 3)
2404 {
2405 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2406 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2407 }
2408 /* int 4 may be generated by one-byte 0xce */
2409 else if (u8TrapNo == 4)
2410 {
2411 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2412 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2413 }
2414 }
2415
2416 /* get error code and cr2 if needed. */
2417 switch (u8TrapNo)
2418 {
2419 case 0x0e:
2420 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2421 /* fallthru */
2422 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2423 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2424 break;
2425
2426 case 0x11: case 0x08:
2427 default:
2428 pVM->rem.s.Env.error_code = 0;
2429 break;
2430 }
2431
2432 /*
2433 * We can now reset the active trap since the recompiler is gonna have a go at it.
2434 */
2435 rc = TRPMResetTrap(pVCpu);
2436 AssertRC(rc);
2437 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2438 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2439 }
2440
2441 /*
2442 * Clear old interrupt request flags; Check for pending hardware interrupts.
2443 * (See @remark for why we don't check for other FFs.)
2444 */
2445 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2446 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2447 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2448 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2449
2450 /*
2451 * We're now in REM mode.
2452 */
2453 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2454 pVM->rem.s.fInREM = true;
2455 pVM->rem.s.fInStateSync = false;
2456 pVM->rem.s.cCanExecuteRaw = 0;
2457 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2458 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2459 return VINF_SUCCESS;
2460}
2461
2462
2463/**
2464 * Syncs back changes in the REM state to the the VM state.
2465 *
2466 * This must be called after invoking REMR3Run().
2467 * Calling it several times in a row is not permitted.
2468 *
2469 * @returns VBox status code.
2470 *
2471 * @param pVM VM Handle.
2472 * @param pVCpu VMCPU Handle.
2473 */
2474REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2475{
2476 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2477 Assert(pCtx);
2478 unsigned i;
2479
2480 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2481 Log2(("REMR3StateBack:\n"));
2482 Assert(pVM->rem.s.fInREM);
2483
2484 /*
2485 * Copy back the registers.
2486 * This is done in the order they are declared in the CPUMCTX structure.
2487 */
2488
2489 /** @todo FOP */
2490 /** @todo FPUIP */
2491 /** @todo CS */
2492 /** @todo FPUDP */
2493 /** @todo DS */
2494
2495 /** @todo check if FPU/XMM was actually used in the recompiler */
2496 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2497//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2498
2499#ifdef TARGET_X86_64
2500 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2501 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2502 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2503 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2504 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2505 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2506 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2507 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2508 pCtx->r8 = pVM->rem.s.Env.regs[8];
2509 pCtx->r9 = pVM->rem.s.Env.regs[9];
2510 pCtx->r10 = pVM->rem.s.Env.regs[10];
2511 pCtx->r11 = pVM->rem.s.Env.regs[11];
2512 pCtx->r12 = pVM->rem.s.Env.regs[12];
2513 pCtx->r13 = pVM->rem.s.Env.regs[13];
2514 pCtx->r14 = pVM->rem.s.Env.regs[14];
2515 pCtx->r15 = pVM->rem.s.Env.regs[15];
2516
2517 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2518
2519#else
2520 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2521 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2522 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2523 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2524 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2525 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2526 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2527
2528 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2529#endif
2530
2531 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2532
2533#ifdef VBOX_WITH_STATISTICS
2534 if (pVM->rem.s.Env.segs[R_SS].newselector)
2535 {
2536 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2537 }
2538 if (pVM->rem.s.Env.segs[R_GS].newselector)
2539 {
2540 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2541 }
2542 if (pVM->rem.s.Env.segs[R_FS].newselector)
2543 {
2544 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2545 }
2546 if (pVM->rem.s.Env.segs[R_ES].newselector)
2547 {
2548 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2549 }
2550 if (pVM->rem.s.Env.segs[R_DS].newselector)
2551 {
2552 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2553 }
2554 if (pVM->rem.s.Env.segs[R_CS].newselector)
2555 {
2556 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2557 }
2558#endif
2559 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2560 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2561 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2562 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2563 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2564
2565#ifdef TARGET_X86_64
2566 pCtx->rip = pVM->rem.s.Env.eip;
2567 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2568#else
2569 pCtx->eip = pVM->rem.s.Env.eip;
2570 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2571#endif
2572
2573 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2574 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2575 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2576 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2577 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2578 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2579
2580 for (i = 0; i < 8; i++)
2581 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2582
2583 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2584 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2585 {
2586 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2587 STAM_COUNTER_INC(&gStatREMGDTChange);
2588 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2589 }
2590
2591 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2592 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2593 {
2594 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2595 STAM_COUNTER_INC(&gStatREMIDTChange);
2596 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2597 }
2598
2599 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2600 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2601 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2602 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2603 {
2604 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2605 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2606 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2607 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2608 STAM_COUNTER_INC(&gStatREMLDTRChange);
2609 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2610 }
2611
2612 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2613 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2614 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2615 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2616 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2617 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2618 : 0) )
2619 {
2620 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2621 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2622 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2623 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2624 pCtx->tr = pVM->rem.s.Env.tr.selector;
2625 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2626 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2627 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2628 if (pCtx->trHid.Attr.u)
2629 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2630 STAM_COUNTER_INC(&gStatREMTRChange);
2631 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2632 }
2633
2634 /** @todo These values could still be out of sync! */
2635 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2636 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2637 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2638 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2639
2640 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2641 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2642 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2643
2644 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2645 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2646 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2647
2648 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2649 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2650 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2651
2652 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2653 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2654 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2655
2656 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2657 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2658 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2659
2660 /* Sysenter MSR */
2661 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2662 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2663 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2664
2665 /* System MSRs. */
2666 pCtx->msrEFER = pVM->rem.s.Env.efer;
2667 pCtx->msrSTAR = pVM->rem.s.Env.star;
2668 pCtx->msrPAT = pVM->rem.s.Env.pat;
2669#ifdef TARGET_X86_64
2670 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2671 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2672 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2673 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2674#endif
2675
2676 remR3TrapClear(pVM);
2677
2678 /*
2679 * Check for traps.
2680 */
2681 if ( pVM->rem.s.Env.exception_index >= 0
2682 && pVM->rem.s.Env.exception_index < 256)
2683 {
2684 int rc;
2685
2686 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2687 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2688 AssertRC(rc);
2689 switch (pVM->rem.s.Env.exception_index)
2690 {
2691 case 0x0e:
2692 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2693 /* fallthru */
2694 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2695 case 0x11: case 0x08: /* 0 */
2696 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2697 break;
2698 }
2699
2700 }
2701
2702 /*
2703 * We're not longer in REM mode.
2704 */
2705 CPUMR3RemLeave(pVCpu,
2706 HWACCMIsEnabled(pVM)
2707 || ( pVM->rem.s.Env.segs[R_SS].newselector
2708 | pVM->rem.s.Env.segs[R_GS].newselector
2709 | pVM->rem.s.Env.segs[R_FS].newselector
2710 | pVM->rem.s.Env.segs[R_ES].newselector
2711 | pVM->rem.s.Env.segs[R_DS].newselector
2712 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2713 );
2714 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2715 pVM->rem.s.fInREM = false;
2716 pVM->rem.s.pCtx = NULL;
2717 pVM->rem.s.Env.pVCpu = NULL;
2718 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2719 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2720 return VINF_SUCCESS;
2721}
2722
2723
2724/**
2725 * This is called by the disassembler when it wants to update the cpu state
2726 * before for instance doing a register dump.
2727 */
2728static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2729{
2730 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2731 unsigned i;
2732
2733 Assert(pVM->rem.s.fInREM);
2734
2735 /*
2736 * Copy back the registers.
2737 * This is done in the order they are declared in the CPUMCTX structure.
2738 */
2739
2740 /** @todo FOP */
2741 /** @todo FPUIP */
2742 /** @todo CS */
2743 /** @todo FPUDP */
2744 /** @todo DS */
2745 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2746 pCtx->fpu.MXCSR = 0;
2747 pCtx->fpu.MXCSR_MASK = 0;
2748
2749 /** @todo check if FPU/XMM was actually used in the recompiler */
2750 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2751//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2752
2753#ifdef TARGET_X86_64
2754 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2755 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2756 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2757 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2758 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2759 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2760 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2761 pCtx->r8 = pVM->rem.s.Env.regs[8];
2762 pCtx->r9 = pVM->rem.s.Env.regs[9];
2763 pCtx->r10 = pVM->rem.s.Env.regs[10];
2764 pCtx->r11 = pVM->rem.s.Env.regs[11];
2765 pCtx->r12 = pVM->rem.s.Env.regs[12];
2766 pCtx->r13 = pVM->rem.s.Env.regs[13];
2767 pCtx->r14 = pVM->rem.s.Env.regs[14];
2768 pCtx->r15 = pVM->rem.s.Env.regs[15];
2769
2770 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2771#else
2772 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2773 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2774 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2775 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2776 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2777 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2778 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2779
2780 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2781#endif
2782
2783 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2784
2785 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2786 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2787 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2788 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2789 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2790
2791#ifdef TARGET_X86_64
2792 pCtx->rip = pVM->rem.s.Env.eip;
2793 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2794#else
2795 pCtx->eip = pVM->rem.s.Env.eip;
2796 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2797#endif
2798
2799 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2800 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2801 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2802 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2803 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2804 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2805
2806 for (i = 0; i < 8; i++)
2807 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2808
2809 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2810 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2811 {
2812 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2813 STAM_COUNTER_INC(&gStatREMGDTChange);
2814 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2815 }
2816
2817 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2818 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2819 {
2820 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2821 STAM_COUNTER_INC(&gStatREMIDTChange);
2822 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2823 }
2824
2825 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2826 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2827 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2828 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2829 {
2830 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2831 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2832 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2833 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2834 STAM_COUNTER_INC(&gStatREMLDTRChange);
2835 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2836 }
2837
2838 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2839 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2840 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2841 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2842 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2843 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2844 : 0) )
2845 {
2846 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2847 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2848 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2849 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2850 pCtx->tr = pVM->rem.s.Env.tr.selector;
2851 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2852 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2853 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2854 if (pCtx->trHid.Attr.u)
2855 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2856 STAM_COUNTER_INC(&gStatREMTRChange);
2857 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2858 }
2859
2860 /** @todo These values could still be out of sync! */
2861 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2862 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2863 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2864 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2865
2866 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2867 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2868 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2869
2870 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2871 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2872 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2873
2874 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2875 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2876 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2877
2878 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2879 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2880 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2881
2882 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2883 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2884 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2885
2886 /* Sysenter MSR */
2887 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2888 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2889 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2890
2891 /* System MSRs. */
2892 pCtx->msrEFER = pVM->rem.s.Env.efer;
2893 pCtx->msrSTAR = pVM->rem.s.Env.star;
2894 pCtx->msrPAT = pVM->rem.s.Env.pat;
2895#ifdef TARGET_X86_64
2896 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2897 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2898 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2899 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2900#endif
2901
2902}
2903
2904
2905/**
2906 * Update the VMM state information if we're currently in REM.
2907 *
2908 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2909 * we're currently executing in REM and the VMM state is invalid. This method will of
2910 * course check that we're executing in REM before syncing any data over to the VMM.
2911 *
2912 * @param pVM The VM handle.
2913 * @param pVCpu The VMCPU handle.
2914 */
2915REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2916{
2917 if (pVM->rem.s.fInREM)
2918 remR3StateUpdate(pVM, pVCpu);
2919}
2920
2921
2922#undef LOG_GROUP
2923#define LOG_GROUP LOG_GROUP_REM
2924
2925
2926/**
2927 * Notify the recompiler about Address Gate 20 state change.
2928 *
2929 * This notification is required since A20 gate changes are
2930 * initialized from a device driver and the VM might just as
2931 * well be in REM mode as in RAW mode.
2932 *
2933 * @param pVM VM handle.
2934 * @param pVCpu VMCPU handle.
2935 * @param fEnable True if the gate should be enabled.
2936 * False if the gate should be disabled.
2937 */
2938REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2939{
2940 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2941 VM_ASSERT_EMT(pVM);
2942
2943 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2944 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2945 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2946}
2947
2948
2949/**
2950 * Replays the handler notification changes
2951 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2952 *
2953 * @param pVM VM handle.
2954 */
2955REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2956{
2957 /*
2958 * Replay the flushes.
2959 */
2960 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2961 VM_ASSERT_EMT(pVM);
2962
2963 /** @todo this isn't ensuring correct replay order. */
2964 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2965 {
2966 uint32_t idxNext;
2967 uint32_t idxRevHead;
2968 uint32_t idxHead;
2969#ifdef VBOX_STRICT
2970 int32_t c = 0;
2971#endif
2972
2973 /* Lockless purging of pending notifications. */
2974 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2975 if (idxHead == UINT32_MAX)
2976 return;
2977 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2978
2979 /*
2980 * Reverse the list to process it in FIFO order.
2981 */
2982 idxRevHead = UINT32_MAX;
2983 do
2984 {
2985 /* Save the index of the next rec. */
2986 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2987 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2988 /* Push the record onto the reversed list. */
2989 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2990 idxRevHead = idxHead;
2991 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2992 /* Advance. */
2993 idxHead = idxNext;
2994 } while (idxHead != UINT32_MAX);
2995
2996 /*
2997 * Loop thru the list, reinserting the record into the free list as they are
2998 * processed to avoid having other EMTs running out of entries while we're flushing.
2999 */
3000 idxHead = idxRevHead;
3001 do
3002 {
3003 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3004 uint32_t idxCur;
3005 Assert(--c >= 0);
3006
3007 switch (pCur->enmKind)
3008 {
3009 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3010 remR3NotifyHandlerPhysicalRegister(pVM,
3011 pCur->u.PhysicalRegister.enmType,
3012 pCur->u.PhysicalRegister.GCPhys,
3013 pCur->u.PhysicalRegister.cb,
3014 pCur->u.PhysicalRegister.fHasHCHandler);
3015 break;
3016
3017 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3018 remR3NotifyHandlerPhysicalDeregister(pVM,
3019 pCur->u.PhysicalDeregister.enmType,
3020 pCur->u.PhysicalDeregister.GCPhys,
3021 pCur->u.PhysicalDeregister.cb,
3022 pCur->u.PhysicalDeregister.fHasHCHandler,
3023 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3024 break;
3025
3026 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3027 remR3NotifyHandlerPhysicalModify(pVM,
3028 pCur->u.PhysicalModify.enmType,
3029 pCur->u.PhysicalModify.GCPhysOld,
3030 pCur->u.PhysicalModify.GCPhysNew,
3031 pCur->u.PhysicalModify.cb,
3032 pCur->u.PhysicalModify.fHasHCHandler,
3033 pCur->u.PhysicalModify.fRestoreAsRAM);
3034 break;
3035
3036 default:
3037 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3038 break;
3039 }
3040
3041 /*
3042 * Advance idxHead.
3043 */
3044 idxCur = idxHead;
3045 idxHead = pCur->idxNext;
3046 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3047
3048 /*
3049 * Put the record back into the free list.
3050 */
3051 do
3052 {
3053 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3054 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3055 ASMCompilerBarrier();
3056 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3057 } while (idxHead != UINT32_MAX);
3058
3059#ifdef VBOX_STRICT
3060 if (pVM->cCpus == 1)
3061 {
3062 unsigned c;
3063 /* Check that all records are now on the free list. */
3064 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3065 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3066 c++;
3067 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3068 }
3069#endif
3070 }
3071}
3072
3073
3074/**
3075 * Notify REM about changed code page.
3076 *
3077 * @returns VBox status code.
3078 * @param pVM VM handle.
3079 * @param pVCpu VMCPU handle.
3080 * @param pvCodePage Code page address
3081 */
3082REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3083{
3084#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3085 int rc;
3086 RTGCPHYS PhysGC;
3087 uint64_t flags;
3088
3089 VM_ASSERT_EMT(pVM);
3090
3091 /*
3092 * Get the physical page address.
3093 */
3094 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3095 if (rc == VINF_SUCCESS)
3096 {
3097 /*
3098 * Sync the required registers and flush the whole page.
3099 * (Easier to do the whole page than notifying it about each physical
3100 * byte that was changed.
3101 */
3102 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3103 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3104 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3105 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3106
3107 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3108 }
3109#endif
3110 return VINF_SUCCESS;
3111}
3112
3113
3114/**
3115 * Notification about a successful MMR3PhysRegister() call.
3116 *
3117 * @param pVM VM handle.
3118 * @param GCPhys The physical address the RAM.
3119 * @param cb Size of the memory.
3120 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3121 */
3122REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3123{
3124 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3125 VM_ASSERT_EMT(pVM);
3126
3127 /*
3128 * Validate input - we trust the caller.
3129 */
3130 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3131 Assert(cb);
3132 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3133 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3134
3135 /*
3136 * Base ram? Update GCPhysLastRam.
3137 */
3138 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3139 {
3140 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3141 {
3142 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3143 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3144 }
3145 }
3146
3147 /*
3148 * Register the ram.
3149 */
3150 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3151
3152 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3153 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3154 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3155
3156 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3157}
3158
3159
3160/**
3161 * Notification about a successful MMR3PhysRomRegister() call.
3162 *
3163 * @param pVM VM handle.
3164 * @param GCPhys The physical address of the ROM.
3165 * @param cb The size of the ROM.
3166 * @param pvCopy Pointer to the ROM copy.
3167 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3168 * This function will be called when ever the protection of the
3169 * shadow ROM changes (at reset and end of POST).
3170 */
3171REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3172{
3173 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3174 VM_ASSERT_EMT(pVM);
3175
3176 /*
3177 * Validate input - we trust the caller.
3178 */
3179 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3180 Assert(cb);
3181 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3182
3183 /*
3184 * Register the rom.
3185 */
3186 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3187
3188 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3189 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3190 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3191
3192 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3193}
3194
3195
3196/**
3197 * Notification about a successful memory deregistration or reservation.
3198 *
3199 * @param pVM VM Handle.
3200 * @param GCPhys Start physical address.
3201 * @param cb The size of the range.
3202 */
3203REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3204{
3205 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3206 VM_ASSERT_EMT(pVM);
3207
3208 /*
3209 * Validate input - we trust the caller.
3210 */
3211 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3212 Assert(cb);
3213 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3214
3215 /*
3216 * Unassigning the memory.
3217 */
3218 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3219
3220 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3221 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3222 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3223
3224 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3225}
3226
3227
3228/**
3229 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3230 *
3231 * @param pVM VM Handle.
3232 * @param enmType Handler type.
3233 * @param GCPhys Handler range address.
3234 * @param cb Size of the handler range.
3235 * @param fHasHCHandler Set if the handler has a HC callback function.
3236 *
3237 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3238 * Handler memory type to memory which has no HC handler.
3239 */
3240static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3241{
3242 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3243 enmType, GCPhys, cb, fHasHCHandler));
3244
3245 VM_ASSERT_EMT(pVM);
3246 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3247 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3248
3249
3250 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3251
3252 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3253 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3254 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3255 else if (fHasHCHandler)
3256 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3257 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3258
3259 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3260}
3261
3262/**
3263 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3264 *
3265 * @param pVM VM Handle.
3266 * @param enmType Handler type.
3267 * @param GCPhys Handler range address.
3268 * @param cb Size of the handler range.
3269 * @param fHasHCHandler Set if the handler has a HC callback function.
3270 *
3271 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3272 * Handler memory type to memory which has no HC handler.
3273 */
3274REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3275{
3276 REMR3ReplayHandlerNotifications(pVM);
3277
3278 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3279}
3280
3281/**
3282 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3283 *
3284 * @param pVM VM Handle.
3285 * @param enmType Handler type.
3286 * @param GCPhys Handler range address.
3287 * @param cb Size of the handler range.
3288 * @param fHasHCHandler Set if the handler has a HC callback function.
3289 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3290 */
3291static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3292{
3293 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3294 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3295 VM_ASSERT_EMT(pVM);
3296
3297
3298 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3299
3300 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3301 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3302 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3303 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3304 else if (fHasHCHandler)
3305 {
3306 if (!fRestoreAsRAM)
3307 {
3308 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3309 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3310 }
3311 else
3312 {
3313 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3314 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3315 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3316 }
3317 }
3318 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3319
3320 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3321}
3322
3323/**
3324 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3325 *
3326 * @param pVM VM Handle.
3327 * @param enmType Handler type.
3328 * @param GCPhys Handler range address.
3329 * @param cb Size of the handler range.
3330 * @param fHasHCHandler Set if the handler has a HC callback function.
3331 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3332 */
3333REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3334{
3335 REMR3ReplayHandlerNotifications(pVM);
3336 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3337}
3338
3339
3340/**
3341 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3342 *
3343 * @param pVM VM Handle.
3344 * @param enmType Handler type.
3345 * @param GCPhysOld Old handler range address.
3346 * @param GCPhysNew New handler range address.
3347 * @param cb Size of the handler range.
3348 * @param fHasHCHandler Set if the handler has a HC callback function.
3349 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3350 */
3351static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3352{
3353 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3354 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3355 VM_ASSERT_EMT(pVM);
3356 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3357
3358 if (fHasHCHandler)
3359 {
3360 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3361
3362 /*
3363 * Reset the old page.
3364 */
3365 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3366 if (!fRestoreAsRAM)
3367 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3368 else
3369 {
3370 /* This is not perfect, but it'll do for PD monitoring... */
3371 Assert(cb == PAGE_SIZE);
3372 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3373 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3374 }
3375
3376 /*
3377 * Update the new page.
3378 */
3379 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3380 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3381 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3382 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3383
3384 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3385 }
3386}
3387
3388/**
3389 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3390 *
3391 * @param pVM VM Handle.
3392 * @param enmType Handler type.
3393 * @param GCPhysOld Old handler range address.
3394 * @param GCPhysNew New handler range address.
3395 * @param cb Size of the handler range.
3396 * @param fHasHCHandler Set if the handler has a HC callback function.
3397 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3398 */
3399REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3400{
3401 REMR3ReplayHandlerNotifications(pVM);
3402
3403 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3404}
3405
3406/**
3407 * Checks if we're handling access to this page or not.
3408 *
3409 * @returns true if we're trapping access.
3410 * @returns false if we aren't.
3411 * @param pVM The VM handle.
3412 * @param GCPhys The physical address.
3413 *
3414 * @remark This function will only work correctly in VBOX_STRICT builds!
3415 */
3416REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3417{
3418#ifdef VBOX_STRICT
3419 unsigned long off;
3420 REMR3ReplayHandlerNotifications(pVM);
3421
3422 off = get_phys_page_offset(GCPhys);
3423 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3424 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3425 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3426#else
3427 return false;
3428#endif
3429}
3430
3431
3432/**
3433 * Deals with a rare case in get_phys_addr_code where the code
3434 * is being monitored.
3435 *
3436 * It could also be an MMIO page, in which case we will raise a fatal error.
3437 *
3438 * @returns The physical address corresponding to addr.
3439 * @param env The cpu environment.
3440 * @param addr The virtual address.
3441 * @param pTLBEntry The TLB entry.
3442 */
3443target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3444 target_ulong addr,
3445 CPUTLBEntry* pTLBEntry,
3446 target_phys_addr_t ioTLBEntry)
3447{
3448 PVM pVM = env->pVM;
3449
3450 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3451 {
3452 /* If code memory is being monitored, appropriate IOTLB entry will have
3453 handler IO type, and addend will provide real physical address, no
3454 matter if we store VA in TLB or not, as handlers are always passed PA */
3455 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3456 return ret;
3457 }
3458 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3459 "*** handlers\n",
3460 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3461 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3462 LogRel(("*** mmio\n"));
3463 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3464 LogRel(("*** phys\n"));
3465 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3466 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3467 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3468 AssertFatalFailed();
3469}
3470
3471/**
3472 * Read guest RAM and ROM.
3473 *
3474 * @param SrcGCPhys The source address (guest physical).
3475 * @param pvDst The destination address.
3476 * @param cb Number of bytes
3477 */
3478void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3479{
3480 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3481 VBOX_CHECK_ADDR(SrcGCPhys);
3482 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3483#ifdef VBOX_DEBUG_PHYS
3484 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3485#endif
3486 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3487}
3488
3489
3490/**
3491 * Read guest RAM and ROM, unsigned 8-bit.
3492 *
3493 * @param SrcGCPhys The source address (guest physical).
3494 */
3495RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3496{
3497 uint8_t val;
3498 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3499 VBOX_CHECK_ADDR(SrcGCPhys);
3500 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3501 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3502#ifdef VBOX_DEBUG_PHYS
3503 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3504#endif
3505 return val;
3506}
3507
3508
3509/**
3510 * Read guest RAM and ROM, signed 8-bit.
3511 *
3512 * @param SrcGCPhys The source address (guest physical).
3513 */
3514RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3515{
3516 int8_t val;
3517 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3518 VBOX_CHECK_ADDR(SrcGCPhys);
3519 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3520 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3521#ifdef VBOX_DEBUG_PHYS
3522 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3523#endif
3524 return val;
3525}
3526
3527
3528/**
3529 * Read guest RAM and ROM, unsigned 16-bit.
3530 *
3531 * @param SrcGCPhys The source address (guest physical).
3532 */
3533RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3534{
3535 uint16_t val;
3536 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3537 VBOX_CHECK_ADDR(SrcGCPhys);
3538 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3539 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3540#ifdef VBOX_DEBUG_PHYS
3541 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3542#endif
3543 return val;
3544}
3545
3546
3547/**
3548 * Read guest RAM and ROM, signed 16-bit.
3549 *
3550 * @param SrcGCPhys The source address (guest physical).
3551 */
3552RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3553{
3554 int16_t val;
3555 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3556 VBOX_CHECK_ADDR(SrcGCPhys);
3557 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3558 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3559#ifdef VBOX_DEBUG_PHYS
3560 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3561#endif
3562 return val;
3563}
3564
3565
3566/**
3567 * Read guest RAM and ROM, unsigned 32-bit.
3568 *
3569 * @param SrcGCPhys The source address (guest physical).
3570 */
3571RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3572{
3573 uint32_t val;
3574 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3575 VBOX_CHECK_ADDR(SrcGCPhys);
3576 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3577 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3578#ifdef VBOX_DEBUG_PHYS
3579 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3580#endif
3581 return val;
3582}
3583
3584
3585/**
3586 * Read guest RAM and ROM, signed 32-bit.
3587 *
3588 * @param SrcGCPhys The source address (guest physical).
3589 */
3590RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3591{
3592 int32_t val;
3593 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3594 VBOX_CHECK_ADDR(SrcGCPhys);
3595 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3596 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3597#ifdef VBOX_DEBUG_PHYS
3598 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3599#endif
3600 return val;
3601}
3602
3603
3604/**
3605 * Read guest RAM and ROM, unsigned 64-bit.
3606 *
3607 * @param SrcGCPhys The source address (guest physical).
3608 */
3609uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3610{
3611 uint64_t val;
3612 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3613 VBOX_CHECK_ADDR(SrcGCPhys);
3614 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3615 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3616#ifdef VBOX_DEBUG_PHYS
3617 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3618#endif
3619 return val;
3620}
3621
3622
3623/**
3624 * Read guest RAM and ROM, signed 64-bit.
3625 *
3626 * @param SrcGCPhys The source address (guest physical).
3627 */
3628int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3629{
3630 int64_t val;
3631 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3632 VBOX_CHECK_ADDR(SrcGCPhys);
3633 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3634 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3635#ifdef VBOX_DEBUG_PHYS
3636 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3637#endif
3638 return val;
3639}
3640
3641
3642/**
3643 * Write guest RAM.
3644 *
3645 * @param DstGCPhys The destination address (guest physical).
3646 * @param pvSrc The source address.
3647 * @param cb Number of bytes to write
3648 */
3649void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3650{
3651 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3652 VBOX_CHECK_ADDR(DstGCPhys);
3653 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3654 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3655#ifdef VBOX_DEBUG_PHYS
3656 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3657#endif
3658}
3659
3660
3661/**
3662 * Write guest RAM, unsigned 8-bit.
3663 *
3664 * @param DstGCPhys The destination address (guest physical).
3665 * @param val Value
3666 */
3667void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3668{
3669 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3670 VBOX_CHECK_ADDR(DstGCPhys);
3671 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3672 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3673#ifdef VBOX_DEBUG_PHYS
3674 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3675#endif
3676}
3677
3678
3679/**
3680 * Write guest RAM, unsigned 8-bit.
3681 *
3682 * @param DstGCPhys The destination address (guest physical).
3683 * @param val Value
3684 */
3685void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3686{
3687 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3688 VBOX_CHECK_ADDR(DstGCPhys);
3689 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3690 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3691#ifdef VBOX_DEBUG_PHYS
3692 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3693#endif
3694}
3695
3696
3697/**
3698 * Write guest RAM, unsigned 32-bit.
3699 *
3700 * @param DstGCPhys The destination address (guest physical).
3701 * @param val Value
3702 */
3703void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3704{
3705 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3706 VBOX_CHECK_ADDR(DstGCPhys);
3707 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3708 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3709#ifdef VBOX_DEBUG_PHYS
3710 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3711#endif
3712}
3713
3714
3715/**
3716 * Write guest RAM, unsigned 64-bit.
3717 *
3718 * @param DstGCPhys The destination address (guest physical).
3719 * @param val Value
3720 */
3721void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3722{
3723 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3724 VBOX_CHECK_ADDR(DstGCPhys);
3725 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3726 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3727#ifdef VBOX_DEBUG_PHYS
3728 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3729#endif
3730}
3731
3732#undef LOG_GROUP
3733#define LOG_GROUP LOG_GROUP_REM_MMIO
3734
3735/** Read MMIO memory. */
3736static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3737{
3738 uint32_t u32 = 0;
3739 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3740 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3741 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3742 return u32;
3743}
3744
3745/** Read MMIO memory. */
3746static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3747{
3748 uint32_t u32 = 0;
3749 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3750 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3751 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3752 return u32;
3753}
3754
3755/** Read MMIO memory. */
3756static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3757{
3758 uint32_t u32 = 0;
3759 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3760 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3761 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3762 return u32;
3763}
3764
3765/** Write to MMIO memory. */
3766static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3767{
3768 int rc;
3769 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3770 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3771 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3772}
3773
3774/** Write to MMIO memory. */
3775static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3776{
3777 int rc;
3778 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3779 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3780 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3781}
3782
3783/** Write to MMIO memory. */
3784static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3785{
3786 int rc;
3787 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3788 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3789 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3790}
3791
3792
3793#undef LOG_GROUP
3794#define LOG_GROUP LOG_GROUP_REM_HANDLER
3795
3796/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3797
3798static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3799{
3800 uint8_t u8;
3801 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3802 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3803 return u8;
3804}
3805
3806static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3807{
3808 uint16_t u16;
3809 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3810 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3811 return u16;
3812}
3813
3814static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3815{
3816 uint32_t u32;
3817 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3818 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3819 return u32;
3820}
3821
3822static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3823{
3824 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3825 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3826}
3827
3828static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3829{
3830 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3831 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3832}
3833
3834static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3835{
3836 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3837 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3838}
3839
3840/* -+- disassembly -+- */
3841
3842#undef LOG_GROUP
3843#define LOG_GROUP LOG_GROUP_REM_DISAS
3844
3845
3846/**
3847 * Enables or disables singled stepped disassembly.
3848 *
3849 * @returns VBox status code.
3850 * @param pVM VM handle.
3851 * @param fEnable To enable set this flag, to disable clear it.
3852 */
3853static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3854{
3855 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3856 VM_ASSERT_EMT(pVM);
3857
3858 if (fEnable)
3859 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3860 else
3861 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3862#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3863 cpu_single_step(&pVM->rem.s.Env, fEnable);
3864#endif
3865 return VINF_SUCCESS;
3866}
3867
3868
3869/**
3870 * Enables or disables singled stepped disassembly.
3871 *
3872 * @returns VBox status code.
3873 * @param pVM VM handle.
3874 * @param fEnable To enable set this flag, to disable clear it.
3875 */
3876REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3877{
3878 int rc;
3879
3880 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3881 if (VM_IS_EMT(pVM))
3882 return remR3DisasEnableStepping(pVM, fEnable);
3883
3884 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3885 AssertRC(rc);
3886 return rc;
3887}
3888
3889
3890#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3891/**
3892 * External Debugger Command: .remstep [on|off|1|0]
3893 */
3894static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3895{
3896 int rc;
3897
3898 if (cArgs == 0)
3899 /*
3900 * Print the current status.
3901 */
3902 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3903 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3904 else
3905 {
3906 /*
3907 * Convert the argument and change the mode.
3908 */
3909 bool fEnable;
3910 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3911 if (RT_SUCCESS(rc))
3912 {
3913 rc = REMR3DisasEnableStepping(pVM, fEnable);
3914 if (RT_SUCCESS(rc))
3915 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3916 else
3917 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3918 }
3919 else
3920 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3921 }
3922 return rc;
3923}
3924#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3925
3926
3927/**
3928 * Disassembles one instruction and prints it to the log.
3929 *
3930 * @returns Success indicator.
3931 * @param env Pointer to the recompiler CPU structure.
3932 * @param f32BitCode Indicates that whether or not the code should
3933 * be disassembled as 16 or 32 bit. If -1 the CS
3934 * selector will be inspected.
3935 * @param pszPrefix
3936 */
3937bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3938{
3939 PVM pVM = env->pVM;
3940 const bool fLog = LogIsEnabled();
3941 const bool fLog2 = LogIs2Enabled();
3942 int rc = VINF_SUCCESS;
3943
3944 /*
3945 * Don't bother if there ain't any log output to do.
3946 */
3947 if (!fLog && !fLog2)
3948 return true;
3949
3950 /*
3951 * Update the state so DBGF reads the correct register values.
3952 */
3953 remR3StateUpdate(pVM, env->pVCpu);
3954
3955 /*
3956 * Log registers if requested.
3957 */
3958 if (fLog2)
3959 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3960
3961 /*
3962 * Disassemble to log.
3963 */
3964 if (fLog)
3965 {
3966 PVMCPU pVCpu = VMMGetCpu(pVM);
3967 char szBuf[256];
3968 szBuf[0] = '\0';
3969 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3970 pVCpu->idCpu,
3971 0, /* Sel */
3972 0, /* GCPtr */
3973 DBGF_DISAS_FLAGS_CURRENT_GUEST
3974 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3975 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3976 szBuf,
3977 sizeof(szBuf),
3978 NULL);
3979 if (RT_FAILURE(rc))
3980 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3981 if (pszPrefix && *pszPrefix)
3982 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3983 else
3984 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3985 }
3986
3987 return RT_SUCCESS(rc);
3988}
3989
3990
3991/**
3992 * Disassemble recompiled code.
3993 *
3994 * @param phFileIgnored Ignored, logfile usually.
3995 * @param pvCode Pointer to the code block.
3996 * @param cb Size of the code block.
3997 */
3998void disas(FILE *phFile, void *pvCode, unsigned long cb)
3999{
4000#ifdef DEBUG_TMP_LOGGING
4001# define DISAS_PRINTF(x...) fprintf(phFile, x)
4002#else
4003# define DISAS_PRINTF(x...) RTLogPrintf(x)
4004 if (LogIs2Enabled())
4005#endif
4006 {
4007 unsigned off = 0;
4008 char szOutput[256];
4009 DISCPUSTATE Cpu;
4010
4011 memset(&Cpu, 0, sizeof(Cpu));
4012#ifdef RT_ARCH_X86
4013 Cpu.mode = CPUMODE_32BIT;
4014#else
4015 Cpu.mode = CPUMODE_64BIT;
4016#endif
4017
4018 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4019 while (off < cb)
4020 {
4021 uint32_t cbInstr;
4022 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4023 DISAS_PRINTF("%s", szOutput);
4024 else
4025 {
4026 DISAS_PRINTF("disas error\n");
4027 cbInstr = 1;
4028#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4029 break;
4030#endif
4031 }
4032 off += cbInstr;
4033 }
4034 }
4035
4036#undef DISAS_PRINTF
4037}
4038
4039
4040/**
4041 * Disassemble guest code.
4042 *
4043 * @param phFileIgnored Ignored, logfile usually.
4044 * @param uCode The guest address of the code to disassemble. (flat?)
4045 * @param cb Number of bytes to disassemble.
4046 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4047 */
4048void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4049{
4050#ifdef DEBUG_TMP_LOGGING
4051# define DISAS_PRINTF(x...) fprintf(phFile, x)
4052#else
4053# define DISAS_PRINTF(x...) RTLogPrintf(x)
4054 if (LogIs2Enabled())
4055#endif
4056 {
4057 PVM pVM = cpu_single_env->pVM;
4058 PVMCPU pVCpu = cpu_single_env->pVCpu;
4059 RTSEL cs;
4060 RTGCUINTPTR eip;
4061
4062 Assert(pVCpu);
4063
4064 /*
4065 * Update the state so DBGF reads the correct register values (flags).
4066 */
4067 remR3StateUpdate(pVM, pVCpu);
4068
4069 /*
4070 * Do the disassembling.
4071 */
4072 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4073 cs = cpu_single_env->segs[R_CS].selector;
4074 eip = uCode - cpu_single_env->segs[R_CS].base;
4075 for (;;)
4076 {
4077 char szBuf[256];
4078 uint32_t cbInstr;
4079 int rc = DBGFR3DisasInstrEx(pVM,
4080 pVCpu->idCpu,
4081 cs,
4082 eip,
4083 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4084 szBuf, sizeof(szBuf),
4085 &cbInstr);
4086 if (RT_SUCCESS(rc))
4087 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4088 else
4089 {
4090 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4091 cbInstr = 1;
4092 }
4093
4094 /* next */
4095 if (cb <= cbInstr)
4096 break;
4097 cb -= cbInstr;
4098 uCode += cbInstr;
4099 eip += cbInstr;
4100 }
4101 }
4102#undef DISAS_PRINTF
4103}
4104
4105
4106/**
4107 * Looks up a guest symbol.
4108 *
4109 * @returns Pointer to symbol name. This is a static buffer.
4110 * @param orig_addr The address in question.
4111 */
4112const char *lookup_symbol(target_ulong orig_addr)
4113{
4114 PVM pVM = cpu_single_env->pVM;
4115 RTGCINTPTR off = 0;
4116 RTDBGSYMBOL Sym;
4117 DBGFADDRESS Addr;
4118
4119 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4120 if (RT_SUCCESS(rc))
4121 {
4122 static char szSym[sizeof(Sym.szName) + 48];
4123 if (!off)
4124 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4125 else if (off > 0)
4126 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4127 else
4128 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4129 return szSym;
4130 }
4131 return "<N/A>";
4132}
4133
4134
4135#undef LOG_GROUP
4136#define LOG_GROUP LOG_GROUP_REM
4137
4138
4139/* -+- FF notifications -+- */
4140
4141
4142/**
4143 * Notification about a pending interrupt.
4144 *
4145 * @param pVM VM Handle.
4146 * @param pVCpu VMCPU Handle.
4147 * @param u8Interrupt Interrupt
4148 * @thread The emulation thread.
4149 */
4150REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4151{
4152 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4153 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4154}
4155
4156/**
4157 * Notification about a pending interrupt.
4158 *
4159 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4160 * @param pVM VM Handle.
4161 * @param pVCpu VMCPU Handle.
4162 * @thread The emulation thread.
4163 */
4164REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4165{
4166 return pVM->rem.s.u32PendingInterrupt;
4167}
4168
4169/**
4170 * Notification about the interrupt FF being set.
4171 *
4172 * @param pVM VM Handle.
4173 * @param pVCpu VMCPU Handle.
4174 * @thread The emulation thread.
4175 */
4176REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4177{
4178 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4179 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4180 if (pVM->rem.s.fInREM)
4181 {
4182 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4183 CPU_INTERRUPT_EXTERNAL_HARD);
4184 }
4185}
4186
4187
4188/**
4189 * Notification about the interrupt FF being set.
4190 *
4191 * @param pVM VM Handle.
4192 * @param pVCpu VMCPU Handle.
4193 * @thread Any.
4194 */
4195REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4196{
4197 LogFlow(("REMR3NotifyInterruptClear:\n"));
4198 if (pVM->rem.s.fInREM)
4199 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4200}
4201
4202
4203/**
4204 * Notification about pending timer(s).
4205 *
4206 * @param pVM VM Handle.
4207 * @param pVCpuDst The target cpu for this notification.
4208 * TM will not broadcast pending timer events, but use
4209 * a dedicated EMT for them. So, only interrupt REM
4210 * execution if the given CPU is executing in REM.
4211 * @thread Any.
4212 */
4213REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4214{
4215#ifndef DEBUG_bird
4216 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4217#endif
4218 if (pVM->rem.s.fInREM)
4219 {
4220 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4221 {
4222 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4223 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4224 CPU_INTERRUPT_EXTERNAL_TIMER);
4225 }
4226 else
4227 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4228 }
4229 else
4230 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4231}
4232
4233
4234/**
4235 * Notification about pending DMA transfers.
4236 *
4237 * @param pVM VM Handle.
4238 * @thread Any.
4239 */
4240REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4241{
4242 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4243 if (pVM->rem.s.fInREM)
4244 {
4245 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4246 CPU_INTERRUPT_EXTERNAL_DMA);
4247 }
4248}
4249
4250
4251/**
4252 * Notification about pending timer(s).
4253 *
4254 * @param pVM VM Handle.
4255 * @thread Any.
4256 */
4257REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4258{
4259 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4260 if (pVM->rem.s.fInREM)
4261 {
4262 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4263 CPU_INTERRUPT_EXTERNAL_EXIT);
4264 }
4265}
4266
4267
4268/**
4269 * Notification about pending FF set by an external thread.
4270 *
4271 * @param pVM VM handle.
4272 * @thread Any.
4273 */
4274REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4275{
4276 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4277 if (pVM->rem.s.fInREM)
4278 {
4279 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4280 CPU_INTERRUPT_EXTERNAL_EXIT);
4281 }
4282}
4283
4284
4285#ifdef VBOX_WITH_STATISTICS
4286void remR3ProfileStart(int statcode)
4287{
4288 STAMPROFILEADV *pStat;
4289 switch(statcode)
4290 {
4291 case STATS_EMULATE_SINGLE_INSTR:
4292 pStat = &gStatExecuteSingleInstr;
4293 break;
4294 case STATS_QEMU_COMPILATION:
4295 pStat = &gStatCompilationQEmu;
4296 break;
4297 case STATS_QEMU_RUN_EMULATED_CODE:
4298 pStat = &gStatRunCodeQEmu;
4299 break;
4300 case STATS_QEMU_TOTAL:
4301 pStat = &gStatTotalTimeQEmu;
4302 break;
4303 case STATS_QEMU_RUN_TIMERS:
4304 pStat = &gStatTimers;
4305 break;
4306 case STATS_TLB_LOOKUP:
4307 pStat= &gStatTBLookup;
4308 break;
4309 case STATS_IRQ_HANDLING:
4310 pStat= &gStatIRQ;
4311 break;
4312 case STATS_RAW_CHECK:
4313 pStat = &gStatRawCheck;
4314 break;
4315
4316 default:
4317 AssertMsgFailed(("unknown stat %d\n", statcode));
4318 return;
4319 }
4320 STAM_PROFILE_ADV_START(pStat, a);
4321}
4322
4323
4324void remR3ProfileStop(int statcode)
4325{
4326 STAMPROFILEADV *pStat;
4327 switch(statcode)
4328 {
4329 case STATS_EMULATE_SINGLE_INSTR:
4330 pStat = &gStatExecuteSingleInstr;
4331 break;
4332 case STATS_QEMU_COMPILATION:
4333 pStat = &gStatCompilationQEmu;
4334 break;
4335 case STATS_QEMU_RUN_EMULATED_CODE:
4336 pStat = &gStatRunCodeQEmu;
4337 break;
4338 case STATS_QEMU_TOTAL:
4339 pStat = &gStatTotalTimeQEmu;
4340 break;
4341 case STATS_QEMU_RUN_TIMERS:
4342 pStat = &gStatTimers;
4343 break;
4344 case STATS_TLB_LOOKUP:
4345 pStat= &gStatTBLookup;
4346 break;
4347 case STATS_IRQ_HANDLING:
4348 pStat= &gStatIRQ;
4349 break;
4350 case STATS_RAW_CHECK:
4351 pStat = &gStatRawCheck;
4352 break;
4353 default:
4354 AssertMsgFailed(("unknown stat %d\n", statcode));
4355 return;
4356 }
4357 STAM_PROFILE_ADV_STOP(pStat, a);
4358}
4359#endif
4360
4361/**
4362 * Raise an RC, force rem exit.
4363 *
4364 * @param pVM VM handle.
4365 * @param rc The rc.
4366 */
4367void remR3RaiseRC(PVM pVM, int rc)
4368{
4369 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4370 Assert(pVM->rem.s.fInREM);
4371 VM_ASSERT_EMT(pVM);
4372 pVM->rem.s.rc = rc;
4373 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4374}
4375
4376
4377/* -+- timers -+- */
4378
4379uint64_t cpu_get_tsc(CPUX86State *env)
4380{
4381 STAM_COUNTER_INC(&gStatCpuGetTSC);
4382 return TMCpuTickGet(env->pVCpu);
4383}
4384
4385
4386/* -+- interrupts -+- */
4387
4388void cpu_set_ferr(CPUX86State *env)
4389{
4390 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4391 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4392}
4393
4394int cpu_get_pic_interrupt(CPUState *env)
4395{
4396 uint8_t u8Interrupt;
4397 int rc;
4398
4399 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4400 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4401 * with the (a)pic.
4402 */
4403 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4404 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4405 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4406 * remove this kludge. */
4407 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4408 {
4409 rc = VINF_SUCCESS;
4410 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4411 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4412 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4413 }
4414 else
4415 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4416
4417 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4418 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4419 if (RT_SUCCESS(rc))
4420 {
4421 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4422 env->interrupt_request |= CPU_INTERRUPT_HARD;
4423 return u8Interrupt;
4424 }
4425 return -1;
4426}
4427
4428
4429/* -+- local apic -+- */
4430
4431#if 0 /* CPUMSetGuestMsr does this now. */
4432void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4433{
4434 int rc = PDMApicSetBase(env->pVM, val);
4435 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4436}
4437#endif
4438
4439uint64_t cpu_get_apic_base(CPUX86State *env)
4440{
4441 uint64_t u64;
4442 int rc = PDMApicGetBase(env->pVM, &u64);
4443 if (RT_SUCCESS(rc))
4444 {
4445 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4446 return u64;
4447 }
4448 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4449 return 0;
4450}
4451
4452void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4453{
4454 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4455 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4456}
4457
4458uint8_t cpu_get_apic_tpr(CPUX86State *env)
4459{
4460 uint8_t u8;
4461 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4462 if (RT_SUCCESS(rc))
4463 {
4464 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4465 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4466 }
4467 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4468 return 0;
4469}
4470
4471/**
4472 * Read an MSR.
4473 *
4474 * @retval 0 success.
4475 * @retval -1 failure, raise \#GP(0).
4476 * @param env The cpu state.
4477 * @param idMsr The MSR to read.
4478 * @param puValue Where to return the value.
4479 */
4480int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4481{
4482 Assert(env->pVCpu);
4483 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4484}
4485
4486/**
4487 * Write to an MSR.
4488 *
4489 * @retval 0 success.
4490 * @retval -1 failure, raise \#GP(0).
4491 * @param env The cpu state.
4492 * @param idMsr The MSR to read.
4493 * @param puValue Where to return the value.
4494 */
4495int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4496{
4497 Assert(env->pVCpu);
4498 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4499}
4500
4501/* -+- I/O Ports -+- */
4502
4503#undef LOG_GROUP
4504#define LOG_GROUP LOG_GROUP_REM_IOPORT
4505
4506void cpu_outb(CPUState *env, int addr, int val)
4507{
4508 int rc;
4509
4510 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4511 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4512
4513 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4514 if (RT_LIKELY(rc == VINF_SUCCESS))
4515 return;
4516 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4517 {
4518 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4519 remR3RaiseRC(env->pVM, rc);
4520 return;
4521 }
4522 remAbort(rc, __FUNCTION__);
4523}
4524
4525void cpu_outw(CPUState *env, int addr, int val)
4526{
4527 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4528 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4529 if (RT_LIKELY(rc == VINF_SUCCESS))
4530 return;
4531 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4532 {
4533 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4534 remR3RaiseRC(env->pVM, rc);
4535 return;
4536 }
4537 remAbort(rc, __FUNCTION__);
4538}
4539
4540void cpu_outl(CPUState *env, int addr, int val)
4541{
4542 int rc;
4543 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4544 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4545 if (RT_LIKELY(rc == VINF_SUCCESS))
4546 return;
4547 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4548 {
4549 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4550 remR3RaiseRC(env->pVM, rc);
4551 return;
4552 }
4553 remAbort(rc, __FUNCTION__);
4554}
4555
4556int cpu_inb(CPUState *env, int addr)
4557{
4558 uint32_t u32 = 0;
4559 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4560 if (RT_LIKELY(rc == VINF_SUCCESS))
4561 {
4562 if (/*addr != 0x61 && */addr != 0x71)
4563 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4564 return (int)u32;
4565 }
4566 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4567 {
4568 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4569 remR3RaiseRC(env->pVM, rc);
4570 return (int)u32;
4571 }
4572 remAbort(rc, __FUNCTION__);
4573 return 0xff;
4574}
4575
4576int cpu_inw(CPUState *env, int addr)
4577{
4578 uint32_t u32 = 0;
4579 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4580 if (RT_LIKELY(rc == VINF_SUCCESS))
4581 {
4582 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4583 return (int)u32;
4584 }
4585 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4586 {
4587 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4588 remR3RaiseRC(env->pVM, rc);
4589 return (int)u32;
4590 }
4591 remAbort(rc, __FUNCTION__);
4592 return 0xffff;
4593}
4594
4595int cpu_inl(CPUState *env, int addr)
4596{
4597 uint32_t u32 = 0;
4598 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4599 if (RT_LIKELY(rc == VINF_SUCCESS))
4600 {
4601//if (addr==0x01f0 && u32 == 0x6b6d)
4602// loglevel = ~0;
4603 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4604 return (int)u32;
4605 }
4606 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4607 {
4608 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4609 remR3RaiseRC(env->pVM, rc);
4610 return (int)u32;
4611 }
4612 remAbort(rc, __FUNCTION__);
4613 return 0xffffffff;
4614}
4615
4616#undef LOG_GROUP
4617#define LOG_GROUP LOG_GROUP_REM
4618
4619
4620/* -+- helpers and misc other interfaces -+- */
4621
4622/**
4623 * Perform the CPUID instruction.
4624 *
4625 * ASMCpuId cannot be invoked from some source files where this is used because of global
4626 * register allocations.
4627 *
4628 * @param env Pointer to the recompiler CPU structure.
4629 * @param uOperator CPUID operation (eax).
4630 * @param pvEAX Where to store eax.
4631 * @param pvEBX Where to store ebx.
4632 * @param pvECX Where to store ecx.
4633 * @param pvEDX Where to store edx.
4634 */
4635void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4636{
4637 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4638}
4639
4640
4641#if 0 /* not used */
4642/**
4643 * Interface for qemu hardware to report back fatal errors.
4644 */
4645void hw_error(const char *pszFormat, ...)
4646{
4647 /*
4648 * Bitch about it.
4649 */
4650 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4651 * this in my Odin32 tree at home! */
4652 va_list args;
4653 va_start(args, pszFormat);
4654 RTLogPrintf("fatal error in virtual hardware:");
4655 RTLogPrintfV(pszFormat, args);
4656 va_end(args);
4657 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4658
4659 /*
4660 * If we're in REM context we'll sync back the state before 'jumping' to
4661 * the EMs failure handling.
4662 */
4663 PVM pVM = cpu_single_env->pVM;
4664 if (pVM->rem.s.fInREM)
4665 REMR3StateBack(pVM);
4666 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4667 AssertMsgFailed(("EMR3FatalError returned!\n"));
4668}
4669#endif
4670
4671/**
4672 * Interface for the qemu cpu to report unhandled situation
4673 * raising a fatal VM error.
4674 */
4675void cpu_abort(CPUState *env, const char *pszFormat, ...)
4676{
4677 va_list va;
4678 PVM pVM;
4679 PVMCPU pVCpu;
4680 char szMsg[256];
4681
4682 /*
4683 * Bitch about it.
4684 */
4685 RTLogFlags(NULL, "nodisabled nobuffered");
4686 RTLogFlush(NULL);
4687
4688 va_start(va, pszFormat);
4689#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4690 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4691 unsigned cArgs = 0;
4692 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4693 const char *psz = strchr(pszFormat, '%');
4694 while (psz && cArgs < 6)
4695 {
4696 auArgs[cArgs++] = va_arg(va, uintptr_t);
4697 psz = strchr(psz + 1, '%');
4698 }
4699 switch (cArgs)
4700 {
4701 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4702 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4703 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4704 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4705 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4706 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4707 default:
4708 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4709 }
4710#else
4711 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4712#endif
4713 va_end(va);
4714
4715 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4716 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4717
4718 /*
4719 * If we're in REM context we'll sync back the state before 'jumping' to
4720 * the EMs failure handling.
4721 */
4722 pVM = cpu_single_env->pVM;
4723 pVCpu = cpu_single_env->pVCpu;
4724 Assert(pVCpu);
4725
4726 if (pVM->rem.s.fInREM)
4727 REMR3StateBack(pVM, pVCpu);
4728 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4729 AssertMsgFailed(("EMR3FatalError returned!\n"));
4730}
4731
4732
4733/**
4734 * Aborts the VM.
4735 *
4736 * @param rc VBox error code.
4737 * @param pszTip Hint about why/when this happened.
4738 */
4739void remAbort(int rc, const char *pszTip)
4740{
4741 PVM pVM;
4742 PVMCPU pVCpu;
4743
4744 /*
4745 * Bitch about it.
4746 */
4747 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4748 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4749
4750 /*
4751 * Jump back to where we entered the recompiler.
4752 */
4753 pVM = cpu_single_env->pVM;
4754 pVCpu = cpu_single_env->pVCpu;
4755 Assert(pVCpu);
4756
4757 if (pVM->rem.s.fInREM)
4758 REMR3StateBack(pVM, pVCpu);
4759
4760 EMR3FatalError(pVCpu, rc);
4761 AssertMsgFailed(("EMR3FatalError returned!\n"));
4762}
4763
4764
4765/**
4766 * Dumps a linux system call.
4767 * @param pVCpu VMCPU handle.
4768 */
4769void remR3DumpLnxSyscall(PVMCPU pVCpu)
4770{
4771 static const char *apsz[] =
4772 {
4773 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4774 "sys_exit",
4775 "sys_fork",
4776 "sys_read",
4777 "sys_write",
4778 "sys_open", /* 5 */
4779 "sys_close",
4780 "sys_waitpid",
4781 "sys_creat",
4782 "sys_link",
4783 "sys_unlink", /* 10 */
4784 "sys_execve",
4785 "sys_chdir",
4786 "sys_time",
4787 "sys_mknod",
4788 "sys_chmod", /* 15 */
4789 "sys_lchown16",
4790 "sys_ni_syscall", /* old break syscall holder */
4791 "sys_stat",
4792 "sys_lseek",
4793 "sys_getpid", /* 20 */
4794 "sys_mount",
4795 "sys_oldumount",
4796 "sys_setuid16",
4797 "sys_getuid16",
4798 "sys_stime", /* 25 */
4799 "sys_ptrace",
4800 "sys_alarm",
4801 "sys_fstat",
4802 "sys_pause",
4803 "sys_utime", /* 30 */
4804 "sys_ni_syscall", /* old stty syscall holder */
4805 "sys_ni_syscall", /* old gtty syscall holder */
4806 "sys_access",
4807 "sys_nice",
4808 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4809 "sys_sync",
4810 "sys_kill",
4811 "sys_rename",
4812 "sys_mkdir",
4813 "sys_rmdir", /* 40 */
4814 "sys_dup",
4815 "sys_pipe",
4816 "sys_times",
4817 "sys_ni_syscall", /* old prof syscall holder */
4818 "sys_brk", /* 45 */
4819 "sys_setgid16",
4820 "sys_getgid16",
4821 "sys_signal",
4822 "sys_geteuid16",
4823 "sys_getegid16", /* 50 */
4824 "sys_acct",
4825 "sys_umount", /* recycled never used phys() */
4826 "sys_ni_syscall", /* old lock syscall holder */
4827 "sys_ioctl",
4828 "sys_fcntl", /* 55 */
4829 "sys_ni_syscall", /* old mpx syscall holder */
4830 "sys_setpgid",
4831 "sys_ni_syscall", /* old ulimit syscall holder */
4832 "sys_olduname",
4833 "sys_umask", /* 60 */
4834 "sys_chroot",
4835 "sys_ustat",
4836 "sys_dup2",
4837 "sys_getppid",
4838 "sys_getpgrp", /* 65 */
4839 "sys_setsid",
4840 "sys_sigaction",
4841 "sys_sgetmask",
4842 "sys_ssetmask",
4843 "sys_setreuid16", /* 70 */
4844 "sys_setregid16",
4845 "sys_sigsuspend",
4846 "sys_sigpending",
4847 "sys_sethostname",
4848 "sys_setrlimit", /* 75 */
4849 "sys_old_getrlimit",
4850 "sys_getrusage",
4851 "sys_gettimeofday",
4852 "sys_settimeofday",
4853 "sys_getgroups16", /* 80 */
4854 "sys_setgroups16",
4855 "old_select",
4856 "sys_symlink",
4857 "sys_lstat",
4858 "sys_readlink", /* 85 */
4859 "sys_uselib",
4860 "sys_swapon",
4861 "sys_reboot",
4862 "old_readdir",
4863 "old_mmap", /* 90 */
4864 "sys_munmap",
4865 "sys_truncate",
4866 "sys_ftruncate",
4867 "sys_fchmod",
4868 "sys_fchown16", /* 95 */
4869 "sys_getpriority",
4870 "sys_setpriority",
4871 "sys_ni_syscall", /* old profil syscall holder */
4872 "sys_statfs",
4873 "sys_fstatfs", /* 100 */
4874 "sys_ioperm",
4875 "sys_socketcall",
4876 "sys_syslog",
4877 "sys_setitimer",
4878 "sys_getitimer", /* 105 */
4879 "sys_newstat",
4880 "sys_newlstat",
4881 "sys_newfstat",
4882 "sys_uname",
4883 "sys_iopl", /* 110 */
4884 "sys_vhangup",
4885 "sys_ni_syscall", /* old "idle" system call */
4886 "sys_vm86old",
4887 "sys_wait4",
4888 "sys_swapoff", /* 115 */
4889 "sys_sysinfo",
4890 "sys_ipc",
4891 "sys_fsync",
4892 "sys_sigreturn",
4893 "sys_clone", /* 120 */
4894 "sys_setdomainname",
4895 "sys_newuname",
4896 "sys_modify_ldt",
4897 "sys_adjtimex",
4898 "sys_mprotect", /* 125 */
4899 "sys_sigprocmask",
4900 "sys_ni_syscall", /* old "create_module" */
4901 "sys_init_module",
4902 "sys_delete_module",
4903 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4904 "sys_quotactl",
4905 "sys_getpgid",
4906 "sys_fchdir",
4907 "sys_bdflush",
4908 "sys_sysfs", /* 135 */
4909 "sys_personality",
4910 "sys_ni_syscall", /* reserved for afs_syscall */
4911 "sys_setfsuid16",
4912 "sys_setfsgid16",
4913 "sys_llseek", /* 140 */
4914 "sys_getdents",
4915 "sys_select",
4916 "sys_flock",
4917 "sys_msync",
4918 "sys_readv", /* 145 */
4919 "sys_writev",
4920 "sys_getsid",
4921 "sys_fdatasync",
4922 "sys_sysctl",
4923 "sys_mlock", /* 150 */
4924 "sys_munlock",
4925 "sys_mlockall",
4926 "sys_munlockall",
4927 "sys_sched_setparam",
4928 "sys_sched_getparam", /* 155 */
4929 "sys_sched_setscheduler",
4930 "sys_sched_getscheduler",
4931 "sys_sched_yield",
4932 "sys_sched_get_priority_max",
4933 "sys_sched_get_priority_min", /* 160 */
4934 "sys_sched_rr_get_interval",
4935 "sys_nanosleep",
4936 "sys_mremap",
4937 "sys_setresuid16",
4938 "sys_getresuid16", /* 165 */
4939 "sys_vm86",
4940 "sys_ni_syscall", /* Old sys_query_module */
4941 "sys_poll",
4942 "sys_nfsservctl",
4943 "sys_setresgid16", /* 170 */
4944 "sys_getresgid16",
4945 "sys_prctl",
4946 "sys_rt_sigreturn",
4947 "sys_rt_sigaction",
4948 "sys_rt_sigprocmask", /* 175 */
4949 "sys_rt_sigpending",
4950 "sys_rt_sigtimedwait",
4951 "sys_rt_sigqueueinfo",
4952 "sys_rt_sigsuspend",
4953 "sys_pread64", /* 180 */
4954 "sys_pwrite64",
4955 "sys_chown16",
4956 "sys_getcwd",
4957 "sys_capget",
4958 "sys_capset", /* 185 */
4959 "sys_sigaltstack",
4960 "sys_sendfile",
4961 "sys_ni_syscall", /* reserved for streams1 */
4962 "sys_ni_syscall", /* reserved for streams2 */
4963 "sys_vfork", /* 190 */
4964 "sys_getrlimit",
4965 "sys_mmap2",
4966 "sys_truncate64",
4967 "sys_ftruncate64",
4968 "sys_stat64", /* 195 */
4969 "sys_lstat64",
4970 "sys_fstat64",
4971 "sys_lchown",
4972 "sys_getuid",
4973 "sys_getgid", /* 200 */
4974 "sys_geteuid",
4975 "sys_getegid",
4976 "sys_setreuid",
4977 "sys_setregid",
4978 "sys_getgroups", /* 205 */
4979 "sys_setgroups",
4980 "sys_fchown",
4981 "sys_setresuid",
4982 "sys_getresuid",
4983 "sys_setresgid", /* 210 */
4984 "sys_getresgid",
4985 "sys_chown",
4986 "sys_setuid",
4987 "sys_setgid",
4988 "sys_setfsuid", /* 215 */
4989 "sys_setfsgid",
4990 "sys_pivot_root",
4991 "sys_mincore",
4992 "sys_madvise",
4993 "sys_getdents64", /* 220 */
4994 "sys_fcntl64",
4995 "sys_ni_syscall", /* reserved for TUX */
4996 "sys_ni_syscall",
4997 "sys_gettid",
4998 "sys_readahead", /* 225 */
4999 "sys_setxattr",
5000 "sys_lsetxattr",
5001 "sys_fsetxattr",
5002 "sys_getxattr",
5003 "sys_lgetxattr", /* 230 */
5004 "sys_fgetxattr",
5005 "sys_listxattr",
5006 "sys_llistxattr",
5007 "sys_flistxattr",
5008 "sys_removexattr", /* 235 */
5009 "sys_lremovexattr",
5010 "sys_fremovexattr",
5011 "sys_tkill",
5012 "sys_sendfile64",
5013 "sys_futex", /* 240 */
5014 "sys_sched_setaffinity",
5015 "sys_sched_getaffinity",
5016 "sys_set_thread_area",
5017 "sys_get_thread_area",
5018 "sys_io_setup", /* 245 */
5019 "sys_io_destroy",
5020 "sys_io_getevents",
5021 "sys_io_submit",
5022 "sys_io_cancel",
5023 "sys_fadvise64", /* 250 */
5024 "sys_ni_syscall",
5025 "sys_exit_group",
5026 "sys_lookup_dcookie",
5027 "sys_epoll_create",
5028 "sys_epoll_ctl", /* 255 */
5029 "sys_epoll_wait",
5030 "sys_remap_file_pages",
5031 "sys_set_tid_address",
5032 "sys_timer_create",
5033 "sys_timer_settime", /* 260 */
5034 "sys_timer_gettime",
5035 "sys_timer_getoverrun",
5036 "sys_timer_delete",
5037 "sys_clock_settime",
5038 "sys_clock_gettime", /* 265 */
5039 "sys_clock_getres",
5040 "sys_clock_nanosleep",
5041 "sys_statfs64",
5042 "sys_fstatfs64",
5043 "sys_tgkill", /* 270 */
5044 "sys_utimes",
5045 "sys_fadvise64_64",
5046 "sys_ni_syscall" /* sys_vserver */
5047 };
5048
5049 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5050 switch (uEAX)
5051 {
5052 default:
5053 if (uEAX < RT_ELEMENTS(apsz))
5054 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5055 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5056 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5057 else
5058 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5059 break;
5060
5061 }
5062}
5063
5064
5065/**
5066 * Dumps an OpenBSD system call.
5067 * @param pVCpu VMCPU handle.
5068 */
5069void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5070{
5071 static const char *apsz[] =
5072 {
5073 "SYS_syscall", //0
5074 "SYS_exit", //1
5075 "SYS_fork", //2
5076 "SYS_read", //3
5077 "SYS_write", //4
5078 "SYS_open", //5
5079 "SYS_close", //6
5080 "SYS_wait4", //7
5081 "SYS_8",
5082 "SYS_link", //9
5083 "SYS_unlink", //10
5084 "SYS_11",
5085 "SYS_chdir", //12
5086 "SYS_fchdir", //13
5087 "SYS_mknod", //14
5088 "SYS_chmod", //15
5089 "SYS_chown", //16
5090 "SYS_break", //17
5091 "SYS_18",
5092 "SYS_19",
5093 "SYS_getpid", //20
5094 "SYS_mount", //21
5095 "SYS_unmount", //22
5096 "SYS_setuid", //23
5097 "SYS_getuid", //24
5098 "SYS_geteuid", //25
5099 "SYS_ptrace", //26
5100 "SYS_recvmsg", //27
5101 "SYS_sendmsg", //28
5102 "SYS_recvfrom", //29
5103 "SYS_accept", //30
5104 "SYS_getpeername", //31
5105 "SYS_getsockname", //32
5106 "SYS_access", //33
5107 "SYS_chflags", //34
5108 "SYS_fchflags", //35
5109 "SYS_sync", //36
5110 "SYS_kill", //37
5111 "SYS_38",
5112 "SYS_getppid", //39
5113 "SYS_40",
5114 "SYS_dup", //41
5115 "SYS_opipe", //42
5116 "SYS_getegid", //43
5117 "SYS_profil", //44
5118 "SYS_ktrace", //45
5119 "SYS_sigaction", //46
5120 "SYS_getgid", //47
5121 "SYS_sigprocmask", //48
5122 "SYS_getlogin", //49
5123 "SYS_setlogin", //50
5124 "SYS_acct", //51
5125 "SYS_sigpending", //52
5126 "SYS_osigaltstack", //53
5127 "SYS_ioctl", //54
5128 "SYS_reboot", //55
5129 "SYS_revoke", //56
5130 "SYS_symlink", //57
5131 "SYS_readlink", //58
5132 "SYS_execve", //59
5133 "SYS_umask", //60
5134 "SYS_chroot", //61
5135 "SYS_62",
5136 "SYS_63",
5137 "SYS_64",
5138 "SYS_65",
5139 "SYS_vfork", //66
5140 "SYS_67",
5141 "SYS_68",
5142 "SYS_sbrk", //69
5143 "SYS_sstk", //70
5144 "SYS_61",
5145 "SYS_vadvise", //72
5146 "SYS_munmap", //73
5147 "SYS_mprotect", //74
5148 "SYS_madvise", //75
5149 "SYS_76",
5150 "SYS_77",
5151 "SYS_mincore", //78
5152 "SYS_getgroups", //79
5153 "SYS_setgroups", //80
5154 "SYS_getpgrp", //81
5155 "SYS_setpgid", //82
5156 "SYS_setitimer", //83
5157 "SYS_84",
5158 "SYS_85",
5159 "SYS_getitimer", //86
5160 "SYS_87",
5161 "SYS_88",
5162 "SYS_89",
5163 "SYS_dup2", //90
5164 "SYS_91",
5165 "SYS_fcntl", //92
5166 "SYS_select", //93
5167 "SYS_94",
5168 "SYS_fsync", //95
5169 "SYS_setpriority", //96
5170 "SYS_socket", //97
5171 "SYS_connect", //98
5172 "SYS_99",
5173 "SYS_getpriority", //100
5174 "SYS_101",
5175 "SYS_102",
5176 "SYS_sigreturn", //103
5177 "SYS_bind", //104
5178 "SYS_setsockopt", //105
5179 "SYS_listen", //106
5180 "SYS_107",
5181 "SYS_108",
5182 "SYS_109",
5183 "SYS_110",
5184 "SYS_sigsuspend", //111
5185 "SYS_112",
5186 "SYS_113",
5187 "SYS_114",
5188 "SYS_115",
5189 "SYS_gettimeofday", //116
5190 "SYS_getrusage", //117
5191 "SYS_getsockopt", //118
5192 "SYS_119",
5193 "SYS_readv", //120
5194 "SYS_writev", //121
5195 "SYS_settimeofday", //122
5196 "SYS_fchown", //123
5197 "SYS_fchmod", //124
5198 "SYS_125",
5199 "SYS_setreuid", //126
5200 "SYS_setregid", //127
5201 "SYS_rename", //128
5202 "SYS_129",
5203 "SYS_130",
5204 "SYS_flock", //131
5205 "SYS_mkfifo", //132
5206 "SYS_sendto", //133
5207 "SYS_shutdown", //134
5208 "SYS_socketpair", //135
5209 "SYS_mkdir", //136
5210 "SYS_rmdir", //137
5211 "SYS_utimes", //138
5212 "SYS_139",
5213 "SYS_adjtime", //140
5214 "SYS_141",
5215 "SYS_142",
5216 "SYS_143",
5217 "SYS_144",
5218 "SYS_145",
5219 "SYS_146",
5220 "SYS_setsid", //147
5221 "SYS_quotactl", //148
5222 "SYS_149",
5223 "SYS_150",
5224 "SYS_151",
5225 "SYS_152",
5226 "SYS_153",
5227 "SYS_154",
5228 "SYS_nfssvc", //155
5229 "SYS_156",
5230 "SYS_157",
5231 "SYS_158",
5232 "SYS_159",
5233 "SYS_160",
5234 "SYS_getfh", //161
5235 "SYS_162",
5236 "SYS_163",
5237 "SYS_164",
5238 "SYS_sysarch", //165
5239 "SYS_166",
5240 "SYS_167",
5241 "SYS_168",
5242 "SYS_169",
5243 "SYS_170",
5244 "SYS_171",
5245 "SYS_172",
5246 "SYS_pread", //173
5247 "SYS_pwrite", //174
5248 "SYS_175",
5249 "SYS_176",
5250 "SYS_177",
5251 "SYS_178",
5252 "SYS_179",
5253 "SYS_180",
5254 "SYS_setgid", //181
5255 "SYS_setegid", //182
5256 "SYS_seteuid", //183
5257 "SYS_lfs_bmapv", //184
5258 "SYS_lfs_markv", //185
5259 "SYS_lfs_segclean", //186
5260 "SYS_lfs_segwait", //187
5261 "SYS_188",
5262 "SYS_189",
5263 "SYS_190",
5264 "SYS_pathconf", //191
5265 "SYS_fpathconf", //192
5266 "SYS_swapctl", //193
5267 "SYS_getrlimit", //194
5268 "SYS_setrlimit", //195
5269 "SYS_getdirentries", //196
5270 "SYS_mmap", //197
5271 "SYS___syscall", //198
5272 "SYS_lseek", //199
5273 "SYS_truncate", //200
5274 "SYS_ftruncate", //201
5275 "SYS___sysctl", //202
5276 "SYS_mlock", //203
5277 "SYS_munlock", //204
5278 "SYS_205",
5279 "SYS_futimes", //206
5280 "SYS_getpgid", //207
5281 "SYS_xfspioctl", //208
5282 "SYS_209",
5283 "SYS_210",
5284 "SYS_211",
5285 "SYS_212",
5286 "SYS_213",
5287 "SYS_214",
5288 "SYS_215",
5289 "SYS_216",
5290 "SYS_217",
5291 "SYS_218",
5292 "SYS_219",
5293 "SYS_220",
5294 "SYS_semget", //221
5295 "SYS_222",
5296 "SYS_223",
5297 "SYS_224",
5298 "SYS_msgget", //225
5299 "SYS_msgsnd", //226
5300 "SYS_msgrcv", //227
5301 "SYS_shmat", //228
5302 "SYS_229",
5303 "SYS_shmdt", //230
5304 "SYS_231",
5305 "SYS_clock_gettime", //232
5306 "SYS_clock_settime", //233
5307 "SYS_clock_getres", //234
5308 "SYS_235",
5309 "SYS_236",
5310 "SYS_237",
5311 "SYS_238",
5312 "SYS_239",
5313 "SYS_nanosleep", //240
5314 "SYS_241",
5315 "SYS_242",
5316 "SYS_243",
5317 "SYS_244",
5318 "SYS_245",
5319 "SYS_246",
5320 "SYS_247",
5321 "SYS_248",
5322 "SYS_249",
5323 "SYS_minherit", //250
5324 "SYS_rfork", //251
5325 "SYS_poll", //252
5326 "SYS_issetugid", //253
5327 "SYS_lchown", //254
5328 "SYS_getsid", //255
5329 "SYS_msync", //256
5330 "SYS_257",
5331 "SYS_258",
5332 "SYS_259",
5333 "SYS_getfsstat", //260
5334 "SYS_statfs", //261
5335 "SYS_fstatfs", //262
5336 "SYS_pipe", //263
5337 "SYS_fhopen", //264
5338 "SYS_265",
5339 "SYS_fhstatfs", //266
5340 "SYS_preadv", //267
5341 "SYS_pwritev", //268
5342 "SYS_kqueue", //269
5343 "SYS_kevent", //270
5344 "SYS_mlockall", //271
5345 "SYS_munlockall", //272
5346 "SYS_getpeereid", //273
5347 "SYS_274",
5348 "SYS_275",
5349 "SYS_276",
5350 "SYS_277",
5351 "SYS_278",
5352 "SYS_279",
5353 "SYS_280",
5354 "SYS_getresuid", //281
5355 "SYS_setresuid", //282
5356 "SYS_getresgid", //283
5357 "SYS_setresgid", //284
5358 "SYS_285",
5359 "SYS_mquery", //286
5360 "SYS_closefrom", //287
5361 "SYS_sigaltstack", //288
5362 "SYS_shmget", //289
5363 "SYS_semop", //290
5364 "SYS_stat", //291
5365 "SYS_fstat", //292
5366 "SYS_lstat", //293
5367 "SYS_fhstat", //294
5368 "SYS___semctl", //295
5369 "SYS_shmctl", //296
5370 "SYS_msgctl", //297
5371 "SYS_MAXSYSCALL", //298
5372 //299
5373 //300
5374 };
5375 uint32_t uEAX;
5376 if (!LogIsEnabled())
5377 return;
5378 uEAX = CPUMGetGuestEAX(pVCpu);
5379 switch (uEAX)
5380 {
5381 default:
5382 if (uEAX < RT_ELEMENTS(apsz))
5383 {
5384 uint32_t au32Args[8] = {0};
5385 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5386 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5387 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5388 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5389 }
5390 else
5391 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5392 break;
5393 }
5394}
5395
5396
5397#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5398/**
5399 * The Dll main entry point (stub).
5400 */
5401bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5402{
5403 return true;
5404}
5405
5406void *memcpy(void *dst, const void *src, size_t size)
5407{
5408 uint8_t*pbDst = dst, *pbSrc = src;
5409 while (size-- > 0)
5410 *pbDst++ = *pbSrc++;
5411 return dst;
5412}
5413
5414#endif
5415
5416void cpu_smm_update(CPUState *env)
5417{
5418}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette