VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 62011

最後變更 在這個檔案從62011是 61776,由 vboxsync 提交於 9 年 前

CPUM,APIC: Per-CPU APIC CPUID feature bit and MSR_IA32_APICBASE GP mask adjustments.

  • Changed the PDMAPICHLPR3::pfnChangeFeature to pfnSetFeatureLevel, removing the RC and R0 versions.
  • Only use pfnSetFeatureLevel from the APIC constructor to communicate to CPUM the max APIC feature level, not to globally flip CPUID[1].EDX[9].
  • Renamed APIC enmOriginalMode to enmMaxMode, changing the type of it and the corresponding config values to PDMAPICMODE. This makes the above simpler and eliminates two conversion functions. It also makes APICMODE private to the APIC again.
  • Introduced CPUMSetGuestCpuIdPerCpuApicFeature for the per-CPU APIC feature bit management.
  • Introduced CPUMCPUIDLEAF_F_CONTAINS_APIC which works same as CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE and CPUMCPUIDLEAF_F_CONTAINS_APIC_ID. Updated existing CPU profiles with this.
  • Made the patch manager helper function actually handle CPUMCPUIDLEAF_F_CONTAINS_APIC and CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE (the latter previously relied on CPUMSetGuestCpuIdFeature/CPUMClearGuestCpuIdFeature from CPUMSetGuestCR4).
  • Pushed CPUMSetGuestCpuIdFeature, CPUMGetGuestCpuIdFeature and CPUMClearGuestCpuIdFeature down to ring-3 only (now CPUMR3*). The latter two function are deprecated.
  • Added call to CPUMSetGuestCpuIdPerCpuApicFeature from load function just in case the APIC is disabled by the guest at the time of saving.
  • CPUMSetGuestCpuIdFeature ensures we've got a MSR_IA32_APICBASE register when enabling the APIC.
  • CPUMSetGuestCpuIdFeature adjust the MSR_IA32_APICBASE GP mask when enabling x2APIC so setting MSR_IA32_APICBASE_EXTD does not trap.
  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 142.0 KB
 
1/* $Id: EMAll.cpp 61776 2016-06-20 23:25:06Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_WITH_IEM
23#define LOG_GROUP LOG_GROUP_EM
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/patm.h>
28#include <VBox/vmm/csam.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_IEM
31# include <VBox/vmm/iem.h>
32#endif
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/stam.h>
35#include "EMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/log.h>
46#include "internal/pgm.h"
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <iprt/string.h>
50
51#ifdef VBOX_WITH_IEM
52//# define VBOX_COMPARE_IEM_AND_EM /* debugging... */
53//# define VBOX_SAME_AS_EM
54//# define VBOX_COMPARE_IEM_LAST
55#endif
56
57#ifdef VBOX_WITH_RAW_RING1
58# define EM_EMULATE_SMSW
59#endif
60
61
62/*********************************************************************************************************************************
63* Defined Constants And Macros *
64*********************************************************************************************************************************/
65/** @def EM_ASSERT_FAULT_RETURN
66 * Safety check.
67 *
68 * Could in theory misfire on a cross page boundary access...
69 *
70 * Currently disabled because the CSAM (+ PATM) patch monitoring occasionally
71 * turns up an alias page instead of the original faulting one and annoying the
72 * heck out of anyone running a debug build. See @bugref{2609} and @bugref{1931}.
73 */
74#if 0
75# define EM_ASSERT_FAULT_RETURN(expr, rc) AssertReturn(expr, rc)
76#else
77# define EM_ASSERT_FAULT_RETURN(expr, rc) do { } while (0)
78#endif
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
85DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
86 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize);
87#endif
88
89
90/*********************************************************************************************************************************
91* Global Variables *
92*********************************************************************************************************************************/
93#ifdef VBOX_COMPARE_IEM_AND_EM
94static const uint32_t g_fInterestingFFs = VMCPU_FF_TO_R3
95 | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_INHIBIT_INTERRUPTS
96 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT
97 | VMCPU_FF_TLB_FLUSH | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL;
98static uint32_t g_fIncomingFFs;
99static CPUMCTX g_IncomingCtx;
100static bool g_fIgnoreRaxRdx = false;
101
102static uint32_t g_fEmFFs;
103static CPUMCTX g_EmCtx;
104static uint8_t g_abEmWrote[256];
105static size_t g_cbEmWrote;
106
107static uint32_t g_fIemFFs;
108static CPUMCTX g_IemCtx;
109extern uint8_t g_abIemWrote[256];
110#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
111extern size_t g_cbIemWrote;
112#else
113static size_t g_cbIemWrote;
114#endif
115#endif
116
117
118/**
119 * Get the current execution manager status.
120 *
121 * @returns Current status.
122 * @param pVCpu The cross context virtual CPU structure.
123 */
124VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
125{
126 return pVCpu->em.s.enmState;
127}
128
129
130/**
131 * Sets the current execution manager status. (use only when you know what you're doing!)
132 *
133 * @param pVCpu The cross context virtual CPU structure.
134 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
135 */
136VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
137{
138 /* Only allowed combination: */
139 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
140 pVCpu->em.s.enmState = enmNewState;
141}
142
143
144/**
145 * Sets the PC for which interrupts should be inhibited.
146 *
147 * @param pVCpu The cross context virtual CPU structure.
148 * @param PC The PC.
149 */
150VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
151{
152 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
153 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
154}
155
156
157/**
158 * Gets the PC for which interrupts should be inhibited.
159 *
160 * There are a few instructions which inhibits or delays interrupts
161 * for the instruction following them. These instructions are:
162 * - STI
163 * - MOV SS, r/m16
164 * - POP SS
165 *
166 * @returns The PC for which interrupts should be inhibited.
167 * @param pVCpu The cross context virtual CPU structure.
168 *
169 */
170VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
171{
172 return pVCpu->em.s.GCPtrInhibitInterrupts;
173}
174
175
176/**
177 * Prepare an MWAIT - essentials of the MONITOR instruction.
178 *
179 * @returns VINF_SUCCESS
180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
181 * @param rax The content of RAX.
182 * @param rcx The content of RCX.
183 * @param rdx The content of RDX.
184 * @param GCPhys The physical address corresponding to rax.
185 */
186VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
187{
188 pVCpu->em.s.MWait.uMonitorRAX = rax;
189 pVCpu->em.s.MWait.uMonitorRCX = rcx;
190 pVCpu->em.s.MWait.uMonitorRDX = rdx;
191 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
192 /** @todo Make use of GCPhys. */
193 NOREF(GCPhys);
194 /** @todo Complete MONITOR implementation. */
195 return VINF_SUCCESS;
196}
197
198
199/**
200 * Performs an MWAIT.
201 *
202 * @returns VINF_SUCCESS
203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
204 * @param rax The content of RAX.
205 * @param rcx The content of RCX.
206 */
207VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
208{
209 pVCpu->em.s.MWait.uMWaitRAX = rax;
210 pVCpu->em.s.MWait.uMWaitRCX = rcx;
211 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
212 if (rcx)
213 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
214 else
215 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
216 /** @todo not completely correct?? */
217 return VINF_EM_HALT;
218}
219
220
221
222/**
223 * Determine if we should continue execution in HM after encountering an mwait
224 * instruction.
225 *
226 * Clears MWAIT flags if returning @c true.
227 *
228 * @returns true if we should continue, false if we should halt.
229 * @param pVCpu The cross context virtual CPU structure.
230 * @param pCtx Current CPU context.
231 */
232VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
233{
234 if ( pCtx->eflags.Bits.u1IF
235 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
236 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
237 {
238 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
239 {
240 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
241 return true;
242 }
243 }
244
245 return false;
246}
247
248
249/**
250 * Determine if we should continue execution in HM after encountering a hlt
251 * instruction.
252 *
253 * @returns true if we should continue, false if we should halt.
254 * @param pVCpu The cross context virtual CPU structure.
255 * @param pCtx Current CPU context.
256 */
257VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
258{
259 if (pCtx->eflags.Bits.u1IF)
260 return !!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
261 return false;
262}
263
264
265/**
266 * Locks REM execution to a single VCPU.
267 *
268 * @param pVM The cross context VM structure.
269 */
270VMMDECL(void) EMRemLock(PVM pVM)
271{
272#ifdef VBOX_WITH_REM
273 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
274 return; /* early init */
275
276 Assert(!PGMIsLockOwner(pVM));
277 Assert(!IOMIsLockWriteOwner(pVM));
278 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
279 AssertRCSuccess(rc);
280#endif
281}
282
283
284/**
285 * Unlocks REM execution
286 *
287 * @param pVM The cross context VM structure.
288 */
289VMMDECL(void) EMRemUnlock(PVM pVM)
290{
291#ifdef VBOX_WITH_REM
292 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
293 return; /* early init */
294
295 PDMCritSectLeave(&pVM->em.s.CritSectREM);
296#endif
297}
298
299
300/**
301 * Check if this VCPU currently owns the REM lock.
302 *
303 * @returns bool owner/not owner
304 * @param pVM The cross context VM structure.
305 */
306VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
307{
308#ifdef VBOX_WITH_REM
309 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
310 return true; /* early init */
311
312 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
313#else
314 return true;
315#endif
316}
317
318
319/**
320 * Try to acquire the REM lock.
321 *
322 * @returns VBox status code
323 * @param pVM The cross context VM structure.
324 */
325VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
326{
327#ifdef VBOX_WITH_REM
328 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
329 return VINF_SUCCESS; /* early init */
330
331 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
332#else
333 return VINF_SUCCESS;
334#endif
335}
336
337
338/**
339 * @callback_method_impl{FNDISREADBYTES}
340 */
341static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
342{
343 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
344#if defined(IN_RC) || defined(IN_RING3)
345 PVM pVM = pVCpu->CTX_SUFF(pVM);
346#endif
347 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
348 int rc;
349
350 /*
351 * Figure how much we can or must read.
352 */
353 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
354 if (cbToRead > cbMaxRead)
355 cbToRead = cbMaxRead;
356 else if (cbToRead < cbMinRead)
357 cbToRead = cbMinRead;
358
359#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
360 /*
361 * We might be called upon to interpret an instruction in a patch.
362 */
363 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), uSrcAddr))
364 {
365# ifdef IN_RC
366 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
367# else
368 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVCpu->CTX_SUFF(pVM), uSrcAddr), cbToRead);
369# endif
370 rc = VINF_SUCCESS;
371 }
372 else
373#endif
374 {
375# ifdef IN_RC
376 /*
377 * Try access it thru the shadow page tables first. Fall back on the
378 * slower PGM method if it fails because the TLB or page table was
379 * modified recently.
380 */
381 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
382 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
383 {
384 cbToRead = cbMinRead;
385 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
386 }
387 if (rc == VERR_ACCESS_DENIED)
388#endif
389 {
390 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
391 if (RT_FAILURE(rc))
392 {
393 if (cbToRead > cbMinRead)
394 {
395 cbToRead = cbMinRead;
396 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
397 }
398 if (RT_FAILURE(rc))
399 {
400#ifndef IN_RC
401 /*
402 * If we fail to find the page via the guest's page tables
403 * we invalidate the page in the host TLB (pertaining to
404 * the guest in the NestedPaging case). See @bugref{6043}.
405 */
406 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
407 {
408 HMInvalidatePage(pVCpu, uSrcAddr);
409 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
410 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
411 }
412#endif
413 }
414 }
415 }
416 }
417
418 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
419 return rc;
420}
421
422
423DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
424{
425 NOREF(pVM);
426 return DISInstrWithReader(InstrGC, (DISCPUMODE)pDis->uCpuMode, emReadBytes, pVCpu, pDis, pOpsize);
427}
428
429
430/**
431 * Disassembles the current instruction.
432 *
433 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
434 * details.
435 *
436 * @param pVM The cross context VM structure.
437 * @param pVCpu The cross context virtual CPU structure.
438 * @param pDis Where to return the parsed instruction info.
439 * @param pcbInstr Where to return the instruction size. (optional)
440 */
441VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
442{
443 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
444 RTGCPTR GCPtrInstr;
445#if 0
446 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
447#else
448/** @todo Get the CPU mode as well while we're at it! */
449 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
450 pCtxCore->rip, &GCPtrInstr);
451#endif
452 if (RT_FAILURE(rc))
453 {
454 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
455 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
456 return rc;
457 }
458 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
459}
460
461
462/**
463 * Disassembles one instruction.
464 *
465 * This is used by internally by the interpreter and by trap/access handlers.
466 *
467 * @returns VBox status code.
468 *
469 * @param pVM The cross context VM structure.
470 * @param pVCpu The cross context virtual CPU structure.
471 * @param GCPtrInstr The flat address of the instruction.
472 * @param pCtxCore The context core (used to determine the cpu mode).
473 * @param pDis Where to return the parsed instruction info.
474 * @param pcbInstr Where to return the instruction size. (optional)
475 */
476VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
477 PDISCPUSTATE pDis, unsigned *pcbInstr)
478{
479 NOREF(pVM);
480 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
481 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
482 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
483 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
484 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
485 if (RT_SUCCESS(rc))
486 return VINF_SUCCESS;
487 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
488 return rc;
489}
490
491
492#if defined(VBOX_COMPARE_IEM_FIRST) || defined(VBOX_COMPARE_IEM_LAST)
493static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx,
494 VBOXSTRICTRC rcEm, VBOXSTRICTRC rcIem,
495 uint32_t cbEm, uint32_t cbIem)
496{
497 /* Quick compare. */
498 if ( rcEm == rcIem
499 && cbEm == cbIem
500 && g_cbEmWrote == g_cbIemWrote
501 && memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote) == 0
502 && memcmp(pIemCtx, pEmCtx, sizeof(*pIemCtx)) == 0
503 && (g_fEmFFs & g_fInterestingFFs) == (g_fIemFFs & g_fInterestingFFs)
504 )
505 return;
506
507 /* Report exact differences. */
508 RTLogPrintf("! EM and IEM differs at %04x:%08RGv !\n", g_IncomingCtx.cs.Sel, g_IncomingCtx.rip);
509 if (rcEm != rcIem)
510 RTLogPrintf(" * rcIem=%Rrc rcEm=%Rrc\n", VBOXSTRICTRC_VAL(rcIem), VBOXSTRICTRC_VAL(rcEm));
511 else if (cbEm != cbIem)
512 RTLogPrintf(" * cbIem=%#x cbEm=%#x\n", cbIem, cbEm);
513
514 if (RT_SUCCESS(rcEm) && RT_SUCCESS(rcIem))
515 {
516 if (g_cbIemWrote != g_cbEmWrote)
517 RTLogPrintf("!! g_cbIemWrote=%#x g_cbEmWrote=%#x\n", g_cbIemWrote, g_cbEmWrote);
518 else if (memcmp(g_abIemWrote, g_abEmWrote, g_cbIemWrote))
519 {
520 RTLogPrintf("!! IemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
521 RTLogPrintf("!! EemWrote %.*Rhxs\n", RT_MIN(RT_MAX(1, g_cbIemWrote), 64), g_abIemWrote);
522 }
523
524 if ((g_fEmFFs & g_fInterestingFFs) != (g_fIemFFs & g_fInterestingFFs))
525 RTLogPrintf("!! g_fIemFFs=%#x g_fEmFFs=%#x (diff=%#x)\n", g_fIemFFs & g_fInterestingFFs,
526 g_fEmFFs & g_fInterestingFFs, (g_fIemFFs ^ g_fEmFFs) & g_fInterestingFFs);
527
528# define CHECK_FIELD(a_Field) \
529 do \
530 { \
531 if (pEmCtx->a_Field != pIemCtx->a_Field) \
532 { \
533 switch (sizeof(pEmCtx->a_Field)) \
534 { \
535 case 1: RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
536 case 2: RTLogPrintf("!! %8s differs - iem=%04x - em=%04x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
537 case 4: RTLogPrintf("!! %8s differs - iem=%08x - em=%08x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
538 case 8: RTLogPrintf("!! %8s differs - iem=%016llx - em=%016llx\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); break; \
539 default: RTLogPrintf("!! %8s differs\n", #a_Field); break; \
540 } \
541 cDiffs++; \
542 } \
543 } while (0)
544
545# define CHECK_BIT_FIELD(a_Field) \
546 do \
547 { \
548 if (pEmCtx->a_Field != pIemCtx->a_Field) \
549 { \
550 RTLogPrintf("!! %8s differs - iem=%02x - em=%02x\n", #a_Field, pIemCtx->a_Field, pEmCtx->a_Field); \
551 cDiffs++; \
552 } \
553 } while (0)
554
555# define CHECK_SEL(a_Sel) \
556 do \
557 { \
558 CHECK_FIELD(a_Sel.Sel); \
559 CHECK_FIELD(a_Sel.Attr.u); \
560 CHECK_FIELD(a_Sel.u64Base); \
561 CHECK_FIELD(a_Sel.u32Limit); \
562 CHECK_FIELD(a_Sel.fFlags); \
563 } while (0)
564
565 unsigned cDiffs = 0;
566 if (memcmp(&pEmCtx->fpu, &pIemCtx->fpu, sizeof(pIemCtx->fpu)))
567 {
568 RTLogPrintf(" the FPU state differs\n");
569 cDiffs++;
570 CHECK_FIELD(fpu.FCW);
571 CHECK_FIELD(fpu.FSW);
572 CHECK_FIELD(fpu.FTW);
573 CHECK_FIELD(fpu.FOP);
574 CHECK_FIELD(fpu.FPUIP);
575 CHECK_FIELD(fpu.CS);
576 CHECK_FIELD(fpu.Rsrvd1);
577 CHECK_FIELD(fpu.FPUDP);
578 CHECK_FIELD(fpu.DS);
579 CHECK_FIELD(fpu.Rsrvd2);
580 CHECK_FIELD(fpu.MXCSR);
581 CHECK_FIELD(fpu.MXCSR_MASK);
582 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
583 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
584 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
585 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
586 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
587 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
588 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
589 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
590 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
591 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
592 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
593 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
594 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
595 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
596 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
597 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
598 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
599 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
600 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
601 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
602 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
603 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
604 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
605 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
606 for (unsigned i = 0; i < RT_ELEMENTS(pEmCtx->fpu.au32RsrvdRest); i++)
607 CHECK_FIELD(fpu.au32RsrvdRest[i]);
608 }
609 CHECK_FIELD(rip);
610 if (pEmCtx->rflags.u != pIemCtx->rflags.u)
611 {
612 RTLogPrintf("!! rflags differs - iem=%08llx em=%08llx\n", pIemCtx->rflags.u, pEmCtx->rflags.u);
613 CHECK_BIT_FIELD(rflags.Bits.u1CF);
614 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
615 CHECK_BIT_FIELD(rflags.Bits.u1PF);
616 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
617 CHECK_BIT_FIELD(rflags.Bits.u1AF);
618 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
619 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
620 CHECK_BIT_FIELD(rflags.Bits.u1SF);
621 CHECK_BIT_FIELD(rflags.Bits.u1TF);
622 CHECK_BIT_FIELD(rflags.Bits.u1IF);
623 CHECK_BIT_FIELD(rflags.Bits.u1DF);
624 CHECK_BIT_FIELD(rflags.Bits.u1OF);
625 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
626 CHECK_BIT_FIELD(rflags.Bits.u1NT);
627 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
628 CHECK_BIT_FIELD(rflags.Bits.u1RF);
629 CHECK_BIT_FIELD(rflags.Bits.u1VM);
630 CHECK_BIT_FIELD(rflags.Bits.u1AC);
631 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
632 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
633 CHECK_BIT_FIELD(rflags.Bits.u1ID);
634 }
635
636 if (!g_fIgnoreRaxRdx)
637 CHECK_FIELD(rax);
638 CHECK_FIELD(rcx);
639 if (!g_fIgnoreRaxRdx)
640 CHECK_FIELD(rdx);
641 CHECK_FIELD(rbx);
642 CHECK_FIELD(rsp);
643 CHECK_FIELD(rbp);
644 CHECK_FIELD(rsi);
645 CHECK_FIELD(rdi);
646 CHECK_FIELD(r8);
647 CHECK_FIELD(r9);
648 CHECK_FIELD(r10);
649 CHECK_FIELD(r11);
650 CHECK_FIELD(r12);
651 CHECK_FIELD(r13);
652 CHECK_SEL(cs);
653 CHECK_SEL(ss);
654 CHECK_SEL(ds);
655 CHECK_SEL(es);
656 CHECK_SEL(fs);
657 CHECK_SEL(gs);
658 CHECK_FIELD(cr0);
659 CHECK_FIELD(cr2);
660 CHECK_FIELD(cr3);
661 CHECK_FIELD(cr4);
662 CHECK_FIELD(dr[0]);
663 CHECK_FIELD(dr[1]);
664 CHECK_FIELD(dr[2]);
665 CHECK_FIELD(dr[3]);
666 CHECK_FIELD(dr[6]);
667 CHECK_FIELD(dr[7]);
668 CHECK_FIELD(gdtr.cbGdt);
669 CHECK_FIELD(gdtr.pGdt);
670 CHECK_FIELD(idtr.cbIdt);
671 CHECK_FIELD(idtr.pIdt);
672 CHECK_SEL(ldtr);
673 CHECK_SEL(tr);
674 CHECK_FIELD(SysEnter.cs);
675 CHECK_FIELD(SysEnter.eip);
676 CHECK_FIELD(SysEnter.esp);
677 CHECK_FIELD(msrEFER);
678 CHECK_FIELD(msrSTAR);
679 CHECK_FIELD(msrPAT);
680 CHECK_FIELD(msrLSTAR);
681 CHECK_FIELD(msrCSTAR);
682 CHECK_FIELD(msrSFMASK);
683 CHECK_FIELD(msrKERNELGSBASE);
684
685# undef CHECK_FIELD
686# undef CHECK_BIT_FIELD
687 }
688}
689#endif /* VBOX_COMPARE_IEM_AND_EM */
690
691
692/**
693 * Interprets the current instruction.
694 *
695 * @returns VBox status code.
696 * @retval VINF_* Scheduling instructions.
697 * @retval VERR_EM_INTERPRETER Something we can't cope with.
698 * @retval VERR_* Fatal errors.
699 *
700 * @param pVCpu The cross context virtual CPU structure.
701 * @param pRegFrame The register frame.
702 * Updates the EIP if an instruction was executed successfully.
703 * @param pvFault The fault address (CR2).
704 *
705 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
706 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
707 * to worry about e.g. invalid modrm combinations (!)
708 */
709VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
710{
711 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
712 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
713#ifdef VBOX_WITH_IEM
714 NOREF(pvFault);
715
716# ifdef VBOX_COMPARE_IEM_AND_EM
717 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
718 g_IncomingCtx = *pCtx;
719 g_fIncomingFFs = pVCpu->fLocalForcedActions;
720 g_cbEmWrote = g_cbIemWrote = 0;
721
722# ifdef VBOX_COMPARE_IEM_FIRST
723 /* IEM */
724 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
725 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
726 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
727 rcIem = VERR_EM_INTERPRETER;
728 g_IemCtx = *pCtx;
729 g_fIemFFs = pVCpu->fLocalForcedActions;
730 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
731 *pCtx = g_IncomingCtx;
732# endif
733
734 /* EM */
735 RTGCPTR pbCode;
736 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
737 if (RT_SUCCESS(rcEm))
738 {
739 uint32_t cbOp;
740 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
741 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
742 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
743 if (RT_SUCCESS(rcEm))
744 {
745 Assert(cbOp == pDis->cbInstr);
746 uint32_t cbIgnored;
747 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
748 if (RT_SUCCESS(rcEm))
749 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
750
751 }
752 rcEm = VERR_EM_INTERPRETER;
753 }
754 else
755 rcEm = VERR_EM_INTERPRETER;
756# ifdef VBOX_SAME_AS_EM
757 if (rcEm == VERR_EM_INTERPRETER)
758 {
759 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
760 return rcEm;
761 }
762# endif
763 g_EmCtx = *pCtx;
764 g_fEmFFs = pVCpu->fLocalForcedActions;
765 VBOXSTRICTRC rc = rcEm;
766
767# ifdef VBOX_COMPARE_IEM_LAST
768 /* IEM */
769 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
770 *pCtx = g_IncomingCtx;
771 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
772 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
773 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
774 rcIem = VERR_EM_INTERPRETER;
775 g_IemCtx = *pCtx;
776 g_fIemFFs = pVCpu->fLocalForcedActions;
777 rc = rcIem;
778# endif
779
780# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
781 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
782# endif
783
784# else
785 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
786 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
787 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
788 rc = VERR_EM_INTERPRETER;
789# endif
790 if (rc != VINF_SUCCESS)
791 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
792
793 return rc;
794#else
795 RTGCPTR pbCode;
796 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
797 if (RT_SUCCESS(rc))
798 {
799 uint32_t cbOp;
800 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
801 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
802 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
803 if (RT_SUCCESS(rc))
804 {
805 Assert(cbOp == pDis->cbInstr);
806 uint32_t cbIgnored;
807 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbIgnored);
808 if (RT_SUCCESS(rc))
809 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
810
811 return rc;
812 }
813 }
814 return VERR_EM_INTERPRETER;
815#endif
816}
817
818
819/**
820 * Interprets the current instruction.
821 *
822 * @returns VBox status code.
823 * @retval VINF_* Scheduling instructions.
824 * @retval VERR_EM_INTERPRETER Something we can't cope with.
825 * @retval VERR_* Fatal errors.
826 *
827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
828 * @param pRegFrame The register frame.
829 * Updates the EIP if an instruction was executed successfully.
830 * @param pvFault The fault address (CR2).
831 * @param pcbWritten Size of the write (if applicable).
832 *
833 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
834 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
835 * to worry about e.g. invalid modrm combinations (!)
836 */
837VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
838{
839 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
840 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
841#ifdef VBOX_WITH_IEM
842 NOREF(pvFault);
843
844# ifdef VBOX_COMPARE_IEM_AND_EM
845 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
846 g_IncomingCtx = *pCtx;
847 g_fIncomingFFs = pVCpu->fLocalForcedActions;
848 g_cbEmWrote = g_cbIemWrote = 0;
849
850# ifdef VBOX_COMPARE_IEM_FIRST
851 /* IEM */
852 uint32_t cbIemWritten = 0;
853 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
854 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
855 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
856 rcIem = VERR_EM_INTERPRETER;
857 g_IemCtx = *pCtx;
858 g_fIemFFs = pVCpu->fLocalForcedActions;
859 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
860 *pCtx = g_IncomingCtx;
861# endif
862
863 /* EM */
864 uint32_t cbEmWritten = 0;
865 RTGCPTR pbCode;
866 VBOXSTRICTRC rcEm = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
867 if (RT_SUCCESS(rcEm))
868 {
869 uint32_t cbOp;
870 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
871 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
872 rcEm = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
873 if (RT_SUCCESS(rcEm))
874 {
875 Assert(cbOp == pDis->cbInstr);
876 rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, &cbEmWritten);
877 if (RT_SUCCESS(rcEm))
878 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
879
880 }
881 else
882 rcEm = VERR_EM_INTERPRETER;
883 }
884 else
885 rcEm = VERR_EM_INTERPRETER;
886# ifdef VBOX_SAME_AS_EM
887 if (rcEm == VERR_EM_INTERPRETER)
888 {
889 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
890 return rcEm;
891 }
892# endif
893 g_EmCtx = *pCtx;
894 g_fEmFFs = pVCpu->fLocalForcedActions;
895 *pcbWritten = cbEmWritten;
896 VBOXSTRICTRC rc = rcEm;
897
898# ifdef VBOX_COMPARE_IEM_LAST
899 /* IEM */
900 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
901 *pCtx = g_IncomingCtx;
902 uint32_t cbIemWritten = 0;
903 VBOXSTRICTRC rcIem = IEMExecOneBypassEx(pVCpu, pRegFrame, &cbIemWritten);
904 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
905 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
906 rcIem = VERR_EM_INTERPRETER;
907 g_IemCtx = *pCtx;
908 g_fIemFFs = pVCpu->fLocalForcedActions;
909 *pcbWritten = cbIemWritten;
910 rc = rcIem;
911# endif
912
913# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
914 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, cbEmWritten, cbIemWritten);
915# endif
916
917# else
918 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
919 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
920 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
921 rc = VERR_EM_INTERPRETER;
922# endif
923 if (rc != VINF_SUCCESS)
924 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
925
926 return rc;
927#else
928 RTGCPTR pbCode;
929 VBOXSTRICTRC rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode);
930 if (RT_SUCCESS(rc))
931 {
932 uint32_t cbOp;
933 PDISCPUSTATE pDis = &pVCpu->em.s.DisState;
934 pDis->uCpuMode = CPUMGetGuestDisMode(pVCpu);
935 rc = emDisCoreOne(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, (RTGCUINTPTR)pbCode, &cbOp);
936 if (RT_SUCCESS(rc))
937 {
938 Assert(cbOp == pDis->cbInstr);
939 rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_SUPERVISOR, pcbWritten);
940 if (RT_SUCCESS(rc))
941 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
942
943 return rc;
944 }
945 }
946 return VERR_EM_INTERPRETER;
947#endif
948}
949
950
951/**
952 * Interprets the current instruction using the supplied DISCPUSTATE structure.
953 *
954 * IP/EIP/RIP *IS* updated!
955 *
956 * @returns VBox strict status code.
957 * @retval VINF_* Scheduling instructions. When these are returned, it
958 * starts to get a bit tricky to know whether code was
959 * executed or not... We'll address this when it becomes a problem.
960 * @retval VERR_EM_INTERPRETER Something we can't cope with.
961 * @retval VERR_* Fatal errors.
962 *
963 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
964 * @param pDis The disassembler cpu state for the instruction to be
965 * interpreted.
966 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
967 * @param pvFault The fault address (CR2).
968 * @param enmCodeType Code type (user/supervisor)
969 *
970 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
971 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
972 * to worry about e.g. invalid modrm combinations (!)
973 *
974 * @todo At this time we do NOT check if the instruction overwrites vital information.
975 * Make sure this can't happen!! (will add some assertions/checks later)
976 */
977VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
978 RTGCPTR pvFault, EMCODETYPE enmCodeType)
979{
980 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
981 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
982#ifdef VBOX_WITH_IEM
983 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
984
985# ifdef VBOX_COMPARE_IEM_AND_EM
986 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
987 g_IncomingCtx = *pCtx;
988 g_fIncomingFFs = pVCpu->fLocalForcedActions;
989 g_cbEmWrote = g_cbIemWrote = 0;
990
991# ifdef VBOX_COMPARE_IEM_FIRST
992 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
993 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
994 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
995 rcIem = VERR_EM_INTERPRETER;
996 g_IemCtx = *pCtx;
997 g_fIemFFs = pVCpu->fLocalForcedActions;
998 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
999 *pCtx = g_IncomingCtx;
1000# endif
1001
1002 /* EM */
1003 uint32_t cbIgnored;
1004 VBOXSTRICTRC rcEm = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1005 if (RT_SUCCESS(rcEm))
1006 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1007# ifdef VBOX_SAME_AS_EM
1008 if (rcEm == VERR_EM_INTERPRETER)
1009 {
1010 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rcEm)));
1011 return rcEm;
1012 }
1013# endif
1014 g_EmCtx = *pCtx;
1015 g_fEmFFs = pVCpu->fLocalForcedActions;
1016 VBOXSTRICTRC rc = rcEm;
1017
1018# ifdef VBOX_COMPARE_IEM_LAST
1019 /* IEM */
1020 pVCpu->fLocalForcedActions = (pVCpu->fLocalForcedActions & ~g_fInterestingFFs) | (g_fIncomingFFs & g_fInterestingFFs);
1021 *pCtx = g_IncomingCtx;
1022 VBOXSTRICTRC rcIem = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1023 if (RT_UNLIKELY( rcIem == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1024 || rcIem == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1025 rcIem = VERR_EM_INTERPRETER;
1026 g_IemCtx = *pCtx;
1027 g_fIemFFs = pVCpu->fLocalForcedActions;
1028 rc = rcIem;
1029# endif
1030
1031# if defined(VBOX_COMPARE_IEM_LAST) || defined(VBOX_COMPARE_IEM_FIRST)
1032 emCompareWithIem(pVCpu, &g_EmCtx, &g_IemCtx, rcEm, rcIem, 0, 0);
1033# endif
1034
1035# else
1036 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1037 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1038 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1039 rc = VERR_EM_INTERPRETER;
1040# endif
1041
1042 if (rc != VINF_SUCCESS)
1043 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1044
1045 return rc;
1046#else
1047 uint32_t cbIgnored;
1048 VBOXSTRICTRC rc = emInterpretInstructionCPUOuter(pVCpu, pDis, pRegFrame, pvFault, enmCodeType, &cbIgnored);
1049 if (RT_SUCCESS(rc))
1050 pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
1051 return rc;
1052#endif
1053}
1054
1055#ifdef IN_RC
1056
1057DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1058{
1059 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1060 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1061 return rc;
1062 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1063}
1064
1065
1066/**
1067 * Interpret IRET (currently only to V86 code) - PATM only.
1068 *
1069 * @returns VBox status code.
1070 * @param pVM The cross context VM structure.
1071 * @param pVCpu The cross context virtual CPU structure.
1072 * @param pRegFrame The register frame.
1073 *
1074 */
1075VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1076{
1077 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1078 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1079 int rc;
1080
1081 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1082 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1083 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1084 * this function. Fear that it may guru on us, thus not converted to
1085 * IEM. */
1086
1087 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1088 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1089 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1090 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1091 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1092
1093 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1094 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1095 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1096 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1097 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1098 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1099 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1100
1101 pRegFrame->eip = eip & 0xffff;
1102 pRegFrame->cs.Sel = cs;
1103
1104 /* Mask away all reserved bits */
1105 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1106 eflags &= uMask;
1107
1108 CPUMRawSetEFlags(pVCpu, eflags);
1109 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1110
1111 pRegFrame->esp = esp;
1112 pRegFrame->ss.Sel = ss;
1113 pRegFrame->ds.Sel = ds;
1114 pRegFrame->es.Sel = es;
1115 pRegFrame->fs.Sel = fs;
1116 pRegFrame->gs.Sel = gs;
1117
1118 return VINF_SUCCESS;
1119}
1120
1121/**
1122 * IRET Emulation.
1123 */
1124static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1125{
1126#ifdef VBOX_WITH_RAW_RING1
1127 NOREF(pvFault); NOREF(pcbSize);
1128 if (EMIsRawRing1Enabled(pVM))
1129 {
1130 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1131 RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
1132 int rc;
1133 uint32_t cpl, rpl;
1134
1135 /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
1136 /* @todo: we don't verify all the edge cases that generate #GP faults */
1137
1138 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1139 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1140 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1141 * this function. Fear that it may guru on us, thus not converted to
1142 * IEM. */
1143
1144 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1145 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1146 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1147 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1148 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1149
1150 /* Deal with V86 above. */
1151 if (eflags & X86_EFL_VM)
1152 return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
1153
1154 cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
1155 rpl = cs & X86_SEL_RPL;
1156
1157 Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
1158 if (rpl != cpl)
1159 {
1160 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1161 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1162 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1163 Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
1164 Log(("emInterpretIret: SS:ESP=%04x:%08x\n", ss, esp));
1165 pRegFrame->ss.Sel = ss;
1166 pRegFrame->esp = esp;
1167 }
1168 pRegFrame->cs.Sel = cs;
1169 pRegFrame->eip = eip;
1170
1171 /* Adjust CS & SS as required. */
1172 CPUMRCRecheckRawState(pVCpu, pRegFrame);
1173
1174 /* Mask away all reserved bits */
1175 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1176 eflags &= uMask;
1177
1178 CPUMRawSetEFlags(pVCpu, eflags);
1179 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1180 return VINF_SUCCESS;
1181 }
1182#else
1183 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
1184#endif
1185 return VERR_EM_INTERPRETER;
1186}
1187
1188#endif /* IN_RC */
1189
1190
1191
1192/*
1193 *
1194 * Old interpreter primitives used by HM, move/eliminate later.
1195 * Old interpreter primitives used by HM, move/eliminate later.
1196 * Old interpreter primitives used by HM, move/eliminate later.
1197 * Old interpreter primitives used by HM, move/eliminate later.
1198 * Old interpreter primitives used by HM, move/eliminate later.
1199 *
1200 */
1201
1202
1203/**
1204 * Interpret CPUID given the parameters in the CPU context.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM The cross context VM structure.
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param pRegFrame The register frame.
1210 *
1211 */
1212VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1213{
1214 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1215 uint32_t iLeaf = pRegFrame->eax;
1216 uint32_t iSubLeaf = pRegFrame->ecx;
1217 NOREF(pVM);
1218
1219 /* cpuid clears the high dwords of the affected 64 bits registers. */
1220 pRegFrame->rax = 0;
1221 pRegFrame->rbx = 0;
1222 pRegFrame->rcx = 0;
1223 pRegFrame->rdx = 0;
1224
1225 /* Note: operates the same in 64 and non-64 bits mode. */
1226 CPUMGetGuestCpuId(pVCpu, iLeaf, iSubLeaf, &pRegFrame->eax, &pRegFrame->ebx, &pRegFrame->ecx, &pRegFrame->edx);
1227 Log(("Emulate: CPUID %x -> %08x %08x %08x %08x\n", iLeaf, pRegFrame->eax, pRegFrame->ebx, pRegFrame->ecx, pRegFrame->edx));
1228 return VINF_SUCCESS;
1229}
1230
1231
1232/**
1233 * Interpret RDTSC.
1234 *
1235 * @returns VBox status code.
1236 * @param pVM The cross context VM structure.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pRegFrame The register frame.
1239 *
1240 */
1241VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1242{
1243 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1244 unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
1245
1246 if (uCR4 & X86_CR4_TSD)
1247 return VERR_EM_INTERPRETER; /* genuine #GP */
1248
1249 uint64_t uTicks = TMCpuTickGet(pVCpu);
1250
1251 /* Same behaviour in 32 & 64 bits mode */
1252 pRegFrame->rax = (uint32_t)uTicks;
1253 pRegFrame->rdx = (uTicks >> 32ULL);
1254#ifdef VBOX_COMPARE_IEM_AND_EM
1255 g_fIgnoreRaxRdx = true;
1256#endif
1257
1258 NOREF(pVM);
1259 return VINF_SUCCESS;
1260}
1261
1262/**
1263 * Interpret RDTSCP.
1264 *
1265 * @returns VBox status code.
1266 * @param pVM The cross context VM structure.
1267 * @param pVCpu The cross context virtual CPU structure.
1268 * @param pCtx The CPU context.
1269 *
1270 */
1271VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1272{
1273 Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
1274 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1275
1276 if (!pVM->cpum.ro.GuestFeatures.fRdTscP)
1277 {
1278 AssertFailed();
1279 return VERR_EM_INTERPRETER; /* genuine #UD */
1280 }
1281
1282 if (uCR4 & X86_CR4_TSD)
1283 return VERR_EM_INTERPRETER; /* genuine #GP */
1284
1285 uint64_t uTicks = TMCpuTickGet(pVCpu);
1286
1287 /* Same behaviour in 32 & 64 bits mode */
1288 pCtx->rax = (uint32_t)uTicks;
1289 pCtx->rdx = (uTicks >> 32ULL);
1290#ifdef VBOX_COMPARE_IEM_AND_EM
1291 g_fIgnoreRaxRdx = true;
1292#endif
1293 /* Low dword of the TSC_AUX msr only. */
1294 VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1295 pCtx->rcx &= UINT32_C(0xffffffff);
1296
1297 return VINF_SUCCESS;
1298}
1299
1300/**
1301 * Interpret RDPMC.
1302 *
1303 * @returns VBox status code.
1304 * @param pVM The cross context VM structure.
1305 * @param pVCpu The cross context virtual CPU structure.
1306 * @param pRegFrame The register frame.
1307 *
1308 */
1309VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1310{
1311 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1312 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1313
1314 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1315 if ( !(uCR4 & X86_CR4_PCE)
1316 && CPUMGetGuestCPL(pVCpu) != 0)
1317 {
1318 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1319 return VERR_EM_INTERPRETER; /* genuine #GP */
1320 }
1321
1322 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1323 pRegFrame->rax = 0;
1324 pRegFrame->rdx = 0;
1325 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1326 * ecx but see @bugref{3472}! */
1327
1328 NOREF(pVM);
1329 return VINF_SUCCESS;
1330}
1331
1332
1333/**
1334 * MWAIT Emulation.
1335 */
1336VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1337{
1338 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1339 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1340 NOREF(pVM);
1341
1342 /* Get the current privilege level. */
1343 cpl = CPUMGetGuestCPL(pVCpu);
1344 if (cpl != 0)
1345 return VERR_EM_INTERPRETER; /* supervisor only */
1346
1347 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1348 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1349 return VERR_EM_INTERPRETER; /* not supported */
1350
1351 /*
1352 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1353 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1354 */
1355 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1356 if (pRegFrame->ecx > 1)
1357 {
1358 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1359 return VERR_EM_INTERPRETER; /* illegal value. */
1360 }
1361
1362 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1363 {
1364 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1365 return VERR_EM_INTERPRETER; /* illegal value. */
1366 }
1367
1368 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1369}
1370
1371
1372/**
1373 * MONITOR Emulation.
1374 */
1375VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1376{
1377 uint32_t u32Dummy, u32ExtFeatures, cpl;
1378 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1379 NOREF(pVM);
1380
1381 if (pRegFrame->ecx != 0)
1382 {
1383 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1384 return VERR_EM_INTERPRETER; /* illegal value. */
1385 }
1386
1387 /* Get the current privilege level. */
1388 cpl = CPUMGetGuestCPL(pVCpu);
1389 if (cpl != 0)
1390 return VERR_EM_INTERPRETER; /* supervisor only */
1391
1392 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1393 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1394 return VERR_EM_INTERPRETER; /* not supported */
1395
1396 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1397 return VINF_SUCCESS;
1398}
1399
1400
1401/* VT-x only: */
1402
1403/**
1404 * Interpret INVLPG.
1405 *
1406 * @returns VBox status code.
1407 * @param pVM The cross context VM structure.
1408 * @param pVCpu The cross context virtual CPU structure.
1409 * @param pRegFrame The register frame.
1410 * @param pAddrGC Operand address.
1411 *
1412 */
1413VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC)
1414{
1415 /** @todo is addr always a flat linear address or ds based
1416 * (in absence of segment override prefixes)????
1417 */
1418 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1419 NOREF(pVM); NOREF(pRegFrame);
1420#ifdef IN_RC
1421 LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
1422#endif
1423 VBOXSTRICTRC rc = PGMInvalidatePage(pVCpu, pAddrGC);
1424 if ( rc == VINF_SUCCESS
1425 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
1426 return VINF_SUCCESS;
1427 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
1428 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), pAddrGC),
1429 VERR_EM_INTERPRETER);
1430 return rc;
1431}
1432
1433
1434#ifdef LOG_ENABLED
1435static const char *emMSRtoString(uint32_t uMsr)
1436{
1437 switch (uMsr)
1438 {
1439 case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE";
1440 case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT";
1441 case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS";
1442 case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP";
1443 case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP";
1444 case MSR_K6_EFER: return "MSR_K6_EFER";
1445 case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK";
1446 case MSR_K6_STAR: return "MSR_K6_STAR";
1447 case MSR_K8_LSTAR: return "MSR_K8_LSTAR";
1448 case MSR_K8_CSTAR: return "MSR_K8_CSTAR";
1449 case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE";
1450 case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE";
1451 case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE";
1452 case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX";
1453 case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID";
1454 case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID";
1455 case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG";
1456 case MSR_IA32_TSC: return "MSR_IA32_TSC";
1457 case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE";
1458 case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP";
1459 case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP";
1460 case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS";
1461 case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL";
1462 case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE";
1463 case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0";
1464 case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1";
1465 case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2";
1466 case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3";
1467 case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL";
1468 case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS";
1469 case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0";
1470 case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1";
1471 case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS";
1472 case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO";
1473 case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL";
1474 case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0";
1475 case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1";
1476 case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2";
1477 case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3";
1478 case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0";
1479 case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1";
1480 case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2";
1481 case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3";
1482 }
1483 return "Unknown MSR";
1484}
1485#endif /* LOG_ENABLED */
1486
1487
1488/**
1489 * Interpret RDMSR
1490 *
1491 * @returns VBox status code.
1492 * @param pVM The cross context VM structure.
1493 * @param pVCpu The cross context virtual CPU structure.
1494 * @param pRegFrame The register frame.
1495 */
1496VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1497{
1498 NOREF(pVM);
1499
1500 /* Get the current privilege level. */
1501 if (CPUMGetGuestCPL(pVCpu) != 0)
1502 {
1503 Log4(("EM: Refuse RDMSR: CPL != 0\n"));
1504 return VERR_EM_INTERPRETER; /* supervisor only */
1505 }
1506
1507 uint64_t uValue;
1508 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
1509 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1510 {
1511 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1512 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_READ);
1513 return VERR_EM_INTERPRETER;
1514 }
1515 pRegFrame->rax = (uint32_t) uValue;
1516 pRegFrame->rdx = (uint32_t)(uValue >> 32);
1517 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
1518 return VINF_SUCCESS;
1519}
1520
1521
1522/**
1523 * Interpret WRMSR
1524 *
1525 * @returns VBox status code.
1526 * @param pVM The cross context VM structure.
1527 * @param pVCpu The cross context virtual CPU structure.
1528 * @param pRegFrame The register frame.
1529 */
1530VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1531{
1532 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1533
1534 /* Check the current privilege level, this instruction is supervisor only. */
1535 if (CPUMGetGuestCPL(pVCpu) != 0)
1536 {
1537 Log4(("EM: Refuse WRMSR: CPL != 0\n"));
1538 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
1539 }
1540
1541 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
1542 if (rcStrict != VINF_SUCCESS)
1543 {
1544 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1545 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);
1546 return VERR_EM_INTERPRETER;
1547 }
1548 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
1549 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
1550 NOREF(pVM);
1551 return VINF_SUCCESS;
1552}
1553
1554
1555/**
1556 * Interpret DRx write.
1557 *
1558 * @returns VBox status code.
1559 * @param pVM The cross context VM structure.
1560 * @param pVCpu The cross context virtual CPU structure.
1561 * @param pRegFrame The register frame.
1562 * @param DestRegDrx DRx register index (USE_REG_DR*)
1563 * @param SrcRegGen General purpose register index (USE_REG_E**))
1564 *
1565 */
1566VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1567{
1568 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1569 uint64_t uNewDrX;
1570 int rc;
1571 NOREF(pVM);
1572
1573 if (CPUMIsGuestIn64BitCode(pVCpu))
1574 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1575 else
1576 {
1577 uint32_t val32;
1578 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1579 uNewDrX = val32;
1580 }
1581
1582 if (RT_SUCCESS(rc))
1583 {
1584 if (DestRegDrx == 6)
1585 {
1586 uNewDrX |= X86_DR6_RA1_MASK;
1587 uNewDrX &= ~X86_DR6_RAZ_MASK;
1588 }
1589 else if (DestRegDrx == 7)
1590 {
1591 uNewDrX |= X86_DR7_RA1_MASK;
1592 uNewDrX &= ~X86_DR7_RAZ_MASK;
1593 }
1594
1595 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1596 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1597 if (RT_SUCCESS(rc))
1598 return rc;
1599 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1600 }
1601 return VERR_EM_INTERPRETER;
1602}
1603
1604
1605/**
1606 * Interpret DRx read.
1607 *
1608 * @returns VBox status code.
1609 * @param pVM The cross context VM structure.
1610 * @param pVCpu The cross context virtual CPU structure.
1611 * @param pRegFrame The register frame.
1612 * @param DestRegGen General purpose register index (USE_REG_E**))
1613 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1614 */
1615VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1616{
1617 uint64_t val64;
1618 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1619 NOREF(pVM);
1620
1621 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1622 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1623 if (CPUMIsGuestIn64BitCode(pVCpu))
1624 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1625 else
1626 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1627
1628 if (RT_SUCCESS(rc))
1629 return VINF_SUCCESS;
1630
1631 return VERR_EM_INTERPRETER;
1632}
1633
1634
1635#if !defined(VBOX_WITH_IEM) || defined(VBOX_COMPARE_IEM_AND_EM)
1636
1637
1638
1639
1640
1641
1642/*
1643 *
1644 * The old interpreter.
1645 * The old interpreter.
1646 * The old interpreter.
1647 * The old interpreter.
1648 * The old interpreter.
1649 *
1650 */
1651
1652DECLINLINE(int) emRamRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1653{
1654#ifdef IN_RC
1655 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1656 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1657 return rc;
1658 /*
1659 * The page pool cache may end up here in some cases because it
1660 * flushed one of the shadow mappings used by the trapping
1661 * instruction and it either flushed the TLB or the CPU reused it.
1662 */
1663#else
1664 NOREF(pVM);
1665#endif
1666 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1667}
1668
1669
1670DECLINLINE(int) emRamWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, uint32_t cb)
1671{
1672 /* Don't use MMGCRamWrite here as it does not respect zero pages, shared
1673 pages or write monitored pages. */
1674 NOREF(pVM);
1675#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
1676 int rc = PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, /*fMayTrap*/ false);
1677#else
1678 int rc = VINF_SUCCESS;
1679#endif
1680#ifdef VBOX_COMPARE_IEM_AND_EM
1681 Log(("EM Wrote: %RGv %.*Rhxs rc=%Rrc\n", GCPtrDst, RT_MAX(RT_MIN(cb, 64), 1), pvSrc, rc));
1682 g_cbEmWrote = cb;
1683 memcpy(g_abEmWrote, pvSrc, RT_MIN(cb, sizeof(g_abEmWrote)));
1684#endif
1685 return rc;
1686}
1687
1688
1689/** Convert sel:addr to a flat GC address. */
1690DECLINLINE(RTGCPTR) emConvertToFlatAddr(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, PDISOPPARAM pParam, RTGCPTR pvAddr)
1691{
1692 DISSELREG enmPrefixSeg = DISDetectSegReg(pDis, pParam);
1693 return SELMToFlat(pVM, enmPrefixSeg, pRegFrame, pvAddr);
1694}
1695
1696
1697#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1698/**
1699 * Get the mnemonic for the disassembled instruction.
1700 *
1701 * GC/R0 doesn't include the strings in the DIS tables because
1702 * of limited space.
1703 */
1704static const char *emGetMnemonic(PDISCPUSTATE pDis)
1705{
1706 switch (pDis->pCurInstr->uOpcode)
1707 {
1708 case OP_XCHG: return "Xchg";
1709 case OP_DEC: return "Dec";
1710 case OP_INC: return "Inc";
1711 case OP_POP: return "Pop";
1712 case OP_OR: return "Or";
1713 case OP_AND: return "And";
1714 case OP_MOV: return "Mov";
1715 case OP_INVLPG: return "InvlPg";
1716 case OP_CPUID: return "CpuId";
1717 case OP_MOV_CR: return "MovCRx";
1718 case OP_MOV_DR: return "MovDRx";
1719 case OP_LLDT: return "LLdt";
1720 case OP_LGDT: return "LGdt";
1721 case OP_LIDT: return "LIdt";
1722 case OP_CLTS: return "Clts";
1723 case OP_MONITOR: return "Monitor";
1724 case OP_MWAIT: return "MWait";
1725 case OP_RDMSR: return "Rdmsr";
1726 case OP_WRMSR: return "Wrmsr";
1727 case OP_ADD: return "Add";
1728 case OP_ADC: return "Adc";
1729 case OP_SUB: return "Sub";
1730 case OP_SBB: return "Sbb";
1731 case OP_RDTSC: return "Rdtsc";
1732 case OP_STI: return "Sti";
1733 case OP_CLI: return "Cli";
1734 case OP_XADD: return "XAdd";
1735 case OP_HLT: return "Hlt";
1736 case OP_IRET: return "Iret";
1737 case OP_MOVNTPS: return "MovNTPS";
1738 case OP_STOSWD: return "StosWD";
1739 case OP_WBINVD: return "WbInvd";
1740 case OP_XOR: return "Xor";
1741 case OP_BTR: return "Btr";
1742 case OP_BTS: return "Bts";
1743 case OP_BTC: return "Btc";
1744 case OP_LMSW: return "Lmsw";
1745 case OP_SMSW: return "Smsw";
1746 case OP_CMPXCHG: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg" : "CmpXchg";
1747 case OP_CMPXCHG8B: return pDis->fPrefix & DISPREFIX_LOCK ? "Lock CmpXchg8b" : "CmpXchg8b";
1748
1749 default:
1750 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode));
1751 return "???";
1752 }
1753}
1754#endif /* VBOX_STRICT || LOG_ENABLED */
1755
1756
1757/**
1758 * XCHG instruction emulation.
1759 */
1760static int emInterpretXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1761{
1762 DISQPVPARAMVAL param1, param2;
1763 NOREF(pvFault);
1764
1765 /* Source to make DISQueryParamVal read the register value - ugly hack */
1766 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
1767 if(RT_FAILURE(rc))
1768 return VERR_EM_INTERPRETER;
1769
1770 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
1771 if(RT_FAILURE(rc))
1772 return VERR_EM_INTERPRETER;
1773
1774#ifdef IN_RC
1775 if (TRPMHasTrap(pVCpu))
1776 {
1777 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1778 {
1779#endif
1780 RTGCPTR pParam1 = 0, pParam2 = 0;
1781 uint64_t valpar1, valpar2;
1782
1783 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
1784 switch(param1.type)
1785 {
1786 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
1787 valpar1 = param1.val.val64;
1788 break;
1789
1790 case DISQPV_TYPE_ADDRESS:
1791 pParam1 = (RTGCPTR)param1.val.val64;
1792 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1793 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1794 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1795 if (RT_FAILURE(rc))
1796 {
1797 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1798 return VERR_EM_INTERPRETER;
1799 }
1800 break;
1801
1802 default:
1803 AssertFailed();
1804 return VERR_EM_INTERPRETER;
1805 }
1806
1807 switch(param2.type)
1808 {
1809 case DISQPV_TYPE_ADDRESS:
1810 pParam2 = (RTGCPTR)param2.val.val64;
1811 pParam2 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pParam2);
1812 EM_ASSERT_FAULT_RETURN(pParam2 == pvFault, VERR_EM_INTERPRETER);
1813 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar2, pParam2, param2.size);
1814 if (RT_FAILURE(rc))
1815 {
1816 AssertMsgFailed(("MMGCRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1817 }
1818 break;
1819
1820 case DISQPV_TYPE_IMMEDIATE:
1821 valpar2 = param2.val.val64;
1822 break;
1823
1824 default:
1825 AssertFailed();
1826 return VERR_EM_INTERPRETER;
1827 }
1828
1829 /* Write value of parameter 2 to parameter 1 (reg or memory address) */
1830 if (pParam1 == 0)
1831 {
1832 Assert(param1.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1833 switch(param1.size)
1834 {
1835 case 1: //special case for AH etc
1836 rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t )valpar2); break;
1837 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)valpar2); break;
1838 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)valpar2); break;
1839 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, valpar2); break;
1840 default: AssertFailedReturn(VERR_EM_INTERPRETER);
1841 }
1842 if (RT_FAILURE(rc))
1843 return VERR_EM_INTERPRETER;
1844 }
1845 else
1846 {
1847 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar2, param1.size);
1848 if (RT_FAILURE(rc))
1849 {
1850 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1851 return VERR_EM_INTERPRETER;
1852 }
1853 }
1854
1855 /* Write value of parameter 1 to parameter 2 (reg or memory address) */
1856 if (pParam2 == 0)
1857 {
1858 Assert(param2.type == DISQPV_TYPE_IMMEDIATE); /* register actually */
1859 switch(param2.size)
1860 {
1861 case 1: //special case for AH etc
1862 rc = DISWriteReg8(pRegFrame, pDis->Param2.Base.idxGenReg, (uint8_t )valpar1); break;
1863 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param2.Base.idxGenReg, (uint16_t)valpar1); break;
1864 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param2.Base.idxGenReg, (uint32_t)valpar1); break;
1865 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param2.Base.idxGenReg, valpar1); break;
1866 default: AssertFailedReturn(VERR_EM_INTERPRETER);
1867 }
1868 if (RT_FAILURE(rc))
1869 return VERR_EM_INTERPRETER;
1870 }
1871 else
1872 {
1873 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam2, &valpar1, param2.size);
1874 if (RT_FAILURE(rc))
1875 {
1876 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1877 return VERR_EM_INTERPRETER;
1878 }
1879 }
1880
1881 *pcbSize = param2.size;
1882 return VINF_SUCCESS;
1883#ifdef IN_RC
1884 }
1885 }
1886 return VERR_EM_INTERPRETER;
1887#endif
1888}
1889
1890
1891/**
1892 * INC and DEC emulation.
1893 */
1894static int emInterpretIncDec(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
1895 PFNEMULATEPARAM2 pfnEmulate)
1896{
1897 DISQPVPARAMVAL param1;
1898 NOREF(pvFault);
1899
1900 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
1901 if(RT_FAILURE(rc))
1902 return VERR_EM_INTERPRETER;
1903
1904#ifdef IN_RC
1905 if (TRPMHasTrap(pVCpu))
1906 {
1907 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1908 {
1909#endif
1910 RTGCPTR pParam1 = 0;
1911 uint64_t valpar1;
1912
1913 if (param1.type == DISQPV_TYPE_ADDRESS)
1914 {
1915 pParam1 = (RTGCPTR)param1.val.val64;
1916 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
1917#ifdef IN_RC
1918 /* Safety check (in theory it could cross a page boundary and fault there though) */
1919 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
1920#endif
1921 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
1922 if (RT_FAILURE(rc))
1923 {
1924 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1925 return VERR_EM_INTERPRETER;
1926 }
1927 }
1928 else
1929 {
1930 AssertFailed();
1931 return VERR_EM_INTERPRETER;
1932 }
1933
1934 uint32_t eflags;
1935
1936 eflags = pfnEmulate(&valpar1, param1.size);
1937
1938 /* Write result back */
1939 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
1940 if (RT_FAILURE(rc))
1941 {
1942 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1943 return VERR_EM_INTERPRETER;
1944 }
1945
1946 /* Update guest's eflags and finish. */
1947 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1948 | (eflags & (X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1949
1950 /* All done! */
1951 *pcbSize = param1.size;
1952 return VINF_SUCCESS;
1953#ifdef IN_RC
1954 }
1955 }
1956 return VERR_EM_INTERPRETER;
1957#endif
1958}
1959
1960
1961/**
1962 * POP Emulation.
1963 */
1964static int emInterpretPop(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
1965{
1966 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
1967 DISQPVPARAMVAL param1;
1968 NOREF(pvFault);
1969
1970 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
1971 if(RT_FAILURE(rc))
1972 return VERR_EM_INTERPRETER;
1973
1974#ifdef IN_RC
1975 if (TRPMHasTrap(pVCpu))
1976 {
1977 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
1978 {
1979#endif
1980 RTGCPTR pParam1 = 0;
1981 uint32_t valpar1;
1982 RTGCPTR pStackVal;
1983
1984 /* Read stack value first */
1985 if (CPUMGetGuestCodeBits(pVCpu) == 16)
1986 return VERR_EM_INTERPRETER; /* No legacy 16 bits stuff here, please. */
1987
1988 /* Convert address; don't bother checking limits etc, as we only read here */
1989 pStackVal = SELMToFlat(pVM, DISSELREG_SS, pRegFrame, (RTGCPTR)pRegFrame->esp);
1990 if (pStackVal == 0)
1991 return VERR_EM_INTERPRETER;
1992
1993 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pStackVal, param1.size);
1994 if (RT_FAILURE(rc))
1995 {
1996 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
1997 return VERR_EM_INTERPRETER;
1998 }
1999
2000 if (param1.type == DISQPV_TYPE_ADDRESS)
2001 {
2002 pParam1 = (RTGCPTR)param1.val.val64;
2003
2004 /* pop [esp+xx] uses esp after the actual pop! */
2005 AssertCompile(DISGREG_ESP == DISGREG_SP);
2006 if ( (pDis->Param1.fUse & DISUSE_BASE)
2007 && (pDis->Param1.fUse & (DISUSE_REG_GEN16|DISUSE_REG_GEN32))
2008 && pDis->Param1.Base.idxGenReg == DISGREG_ESP
2009 )
2010 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + param1.size);
2011
2012 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2013 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault || (RTGCPTR)pRegFrame->esp == pvFault, VERR_EM_INTERPRETER);
2014 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2015 if (RT_FAILURE(rc))
2016 {
2017 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2018 return VERR_EM_INTERPRETER;
2019 }
2020
2021 /* Update ESP as the last step */
2022 pRegFrame->esp += param1.size;
2023 }
2024 else
2025 {
2026#ifndef DEBUG_bird // annoying assertion.
2027 AssertFailed();
2028#endif
2029 return VERR_EM_INTERPRETER;
2030 }
2031
2032 /* All done! */
2033 *pcbSize = param1.size;
2034 return VINF_SUCCESS;
2035#ifdef IN_RC
2036 }
2037 }
2038 return VERR_EM_INTERPRETER;
2039#endif
2040}
2041
2042
2043/**
2044 * XOR/OR/AND Emulation.
2045 */
2046static int emInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2047 PFNEMULATEPARAM3 pfnEmulate)
2048{
2049 DISQPVPARAMVAL param1, param2;
2050 NOREF(pvFault);
2051
2052 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2053 if(RT_FAILURE(rc))
2054 return VERR_EM_INTERPRETER;
2055
2056 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2057 if(RT_FAILURE(rc))
2058 return VERR_EM_INTERPRETER;
2059
2060#ifdef IN_RC
2061 if (TRPMHasTrap(pVCpu))
2062 {
2063 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2064 {
2065#endif
2066 RTGCPTR pParam1;
2067 uint64_t valpar1, valpar2;
2068
2069 if (pDis->Param1.cb != pDis->Param2.cb)
2070 {
2071 if (pDis->Param1.cb < pDis->Param2.cb)
2072 {
2073 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2074 return VERR_EM_INTERPRETER;
2075 }
2076 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2077 pDis->Param2.cb = pDis->Param1.cb;
2078 param2.size = param1.size;
2079 }
2080
2081 /* The destination is always a virtual address */
2082 if (param1.type == DISQPV_TYPE_ADDRESS)
2083 {
2084 pParam1 = (RTGCPTR)param1.val.val64;
2085 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2086 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2087 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2088 if (RT_FAILURE(rc))
2089 {
2090 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2091 return VERR_EM_INTERPRETER;
2092 }
2093 }
2094 else
2095 {
2096 AssertFailed();
2097 return VERR_EM_INTERPRETER;
2098 }
2099
2100 /* Register or immediate data */
2101 switch(param2.type)
2102 {
2103 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2104 valpar2 = param2.val.val64;
2105 break;
2106
2107 default:
2108 AssertFailed();
2109 return VERR_EM_INTERPRETER;
2110 }
2111
2112 LogFlow(("emInterpretOrXorAnd %s %RGv %RX64 - %RX64 size %d (%d)\n", emGetMnemonic(pDis), pParam1, valpar1, valpar2, param2.size, param1.size));
2113
2114 /* Data read, emulate instruction. */
2115 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2116
2117 LogFlow(("emInterpretOrXorAnd %s result %RX64\n", emGetMnemonic(pDis), valpar1));
2118
2119 /* Update guest's eflags and finish. */
2120 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2121 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2122
2123 /* And write it back */
2124 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2125 if (RT_SUCCESS(rc))
2126 {
2127 /* All done! */
2128 *pcbSize = param2.size;
2129 return VINF_SUCCESS;
2130 }
2131#ifdef IN_RC
2132 }
2133 }
2134#endif
2135 return VERR_EM_INTERPRETER;
2136}
2137
2138
2139#ifndef VBOX_COMPARE_IEM_AND_EM
2140/**
2141 * LOCK XOR/OR/AND Emulation.
2142 */
2143static int emInterpretLockOrXorAnd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2144 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate)
2145{
2146 void *pvParam1;
2147 DISQPVPARAMVAL param1, param2;
2148 NOREF(pvFault);
2149
2150#if HC_ARCH_BITS == 32
2151 Assert(pDis->Param1.cb <= 4);
2152#endif
2153
2154 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2155 if(RT_FAILURE(rc))
2156 return VERR_EM_INTERPRETER;
2157
2158 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2159 if(RT_FAILURE(rc))
2160 return VERR_EM_INTERPRETER;
2161
2162 if (pDis->Param1.cb != pDis->Param2.cb)
2163 {
2164 AssertMsgReturn(pDis->Param1.cb >= pDis->Param2.cb, /* should never happen! */
2165 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb),
2166 VERR_EM_INTERPRETER);
2167
2168 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2169 pDis->Param2.cb = pDis->Param1.cb;
2170 param2.size = param1.size;
2171 }
2172
2173#ifdef IN_RC
2174 /* Safety check (in theory it could cross a page boundary and fault there though) */
2175 Assert( TRPMHasTrap(pVCpu)
2176 && (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW));
2177 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2178#endif
2179
2180 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2181 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2182 RTGCUINTREG ValPar2 = param2.val.val64;
2183
2184 /* The destination is always a virtual address */
2185 AssertReturn(param1.type == DISQPV_TYPE_ADDRESS, VERR_EM_INTERPRETER);
2186
2187 RTGCPTR GCPtrPar1 = param1.val.val64;
2188 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2189 PGMPAGEMAPLOCK Lock;
2190 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2191 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2192
2193 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2194 Log2(("%s %RGv imm%d=%RX64\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2195
2196 RTGCUINTREG32 eflags = 0;
2197 rc = pfnEmulate(pvParam1, ValPar2, pDis->Param2.cb, &eflags);
2198 PGMPhysReleasePageMappingLock(pVM, &Lock);
2199 if (RT_FAILURE(rc))
2200 {
2201 Log(("%s %RGv imm%d=%RX64-> emulation failed due to page fault!\n", emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2202 return VERR_EM_INTERPRETER;
2203 }
2204
2205 /* Update guest's eflags and finish. */
2206 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2207 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2208
2209 *pcbSize = param2.size;
2210 return VINF_SUCCESS;
2211}
2212#endif /* !VBOX_COMPARE_IEM_AND_EM */
2213
2214
2215/**
2216 * ADD, ADC & SUB Emulation.
2217 */
2218static int emInterpretAddSub(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2219 PFNEMULATEPARAM3 pfnEmulate)
2220{
2221 NOREF(pvFault);
2222 DISQPVPARAMVAL param1, param2;
2223 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2224 if(RT_FAILURE(rc))
2225 return VERR_EM_INTERPRETER;
2226
2227 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2228 if(RT_FAILURE(rc))
2229 return VERR_EM_INTERPRETER;
2230
2231#ifdef IN_RC
2232 if (TRPMHasTrap(pVCpu))
2233 {
2234 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2235 {
2236#endif
2237 RTGCPTR pParam1;
2238 uint64_t valpar1, valpar2;
2239
2240 if (pDis->Param1.cb != pDis->Param2.cb)
2241 {
2242 if (pDis->Param1.cb < pDis->Param2.cb)
2243 {
2244 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip, pDis->Param1.cb, pDis->Param2.cb)); /* should never happen! */
2245 return VERR_EM_INTERPRETER;
2246 }
2247 /* Or %Ev, Ib -> just a hack to save some space; the data width of the 1st parameter determines the real width */
2248 pDis->Param2.cb = pDis->Param1.cb;
2249 param2.size = param1.size;
2250 }
2251
2252 /* The destination is always a virtual address */
2253 if (param1.type == DISQPV_TYPE_ADDRESS)
2254 {
2255 pParam1 = (RTGCPTR)param1.val.val64;
2256 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2257 EM_ASSERT_FAULT_RETURN(pParam1 == pvFault, VERR_EM_INTERPRETER);
2258 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, param1.size);
2259 if (RT_FAILURE(rc))
2260 {
2261 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2262 return VERR_EM_INTERPRETER;
2263 }
2264 }
2265 else
2266 {
2267#ifndef DEBUG_bird
2268 AssertFailed();
2269#endif
2270 return VERR_EM_INTERPRETER;
2271 }
2272
2273 /* Register or immediate data */
2274 switch(param2.type)
2275 {
2276 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2277 valpar2 = param2.val.val64;
2278 break;
2279
2280 default:
2281 AssertFailed();
2282 return VERR_EM_INTERPRETER;
2283 }
2284
2285 /* Data read, emulate instruction. */
2286 uint32_t eflags = pfnEmulate(&valpar1, valpar2, param2.size);
2287
2288 /* Update guest's eflags and finish. */
2289 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2290 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2291
2292 /* And write it back */
2293 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, param1.size);
2294 if (RT_SUCCESS(rc))
2295 {
2296 /* All done! */
2297 *pcbSize = param2.size;
2298 return VINF_SUCCESS;
2299 }
2300#ifdef IN_RC
2301 }
2302 }
2303#endif
2304 return VERR_EM_INTERPRETER;
2305}
2306
2307
2308/**
2309 * ADC Emulation.
2310 */
2311static int emInterpretAdc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2312{
2313 if (pRegFrame->eflags.Bits.u1CF)
2314 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdcWithCarrySet);
2315 else
2316 return emInterpretAddSub(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, EMEmulateAdd);
2317}
2318
2319
2320/**
2321 * BTR/C/S Emulation.
2322 */
2323static int emInterpretBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize,
2324 PFNEMULATEPARAM2UINT32 pfnEmulate)
2325{
2326 DISQPVPARAMVAL param1, param2;
2327 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2328 if(RT_FAILURE(rc))
2329 return VERR_EM_INTERPRETER;
2330
2331 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2332 if(RT_FAILURE(rc))
2333 return VERR_EM_INTERPRETER;
2334
2335#ifdef IN_RC
2336 if (TRPMHasTrap(pVCpu))
2337 {
2338 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2339 {
2340#endif
2341 RTGCPTR pParam1;
2342 uint64_t valpar1 = 0, valpar2;
2343 uint32_t eflags;
2344
2345 /* The destination is always a virtual address */
2346 if (param1.type != DISQPV_TYPE_ADDRESS)
2347 return VERR_EM_INTERPRETER;
2348
2349 pParam1 = (RTGCPTR)param1.val.val64;
2350 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
2351
2352 /* Register or immediate data */
2353 switch(param2.type)
2354 {
2355 case DISQPV_TYPE_IMMEDIATE: /* both immediate data and register (ugly) */
2356 valpar2 = param2.val.val64;
2357 break;
2358
2359 default:
2360 AssertFailed();
2361 return VERR_EM_INTERPRETER;
2362 }
2363
2364 Log2(("emInterpret%s: pvFault=%RGv pParam1=%RGv val2=%x\n", emGetMnemonic(pDis), pvFault, pParam1, valpar2));
2365 pParam1 = (RTGCPTR)((RTGCUINTPTR)pParam1 + valpar2/8);
2366 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)pParam1 & ~3) == pvFault, VERR_EM_INTERPRETER); NOREF(pvFault);
2367 rc = emRamRead(pVM, pVCpu, pRegFrame, &valpar1, pParam1, 1);
2368 if (RT_FAILURE(rc))
2369 {
2370 AssertMsgFailed(("emRamRead %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
2371 return VERR_EM_INTERPRETER;
2372 }
2373
2374 Log2(("emInterpretBtx: val=%x\n", valpar1));
2375 /* Data read, emulate bit test instruction. */
2376 eflags = pfnEmulate(&valpar1, valpar2 & 0x7);
2377
2378 Log2(("emInterpretBtx: val=%x CF=%d\n", valpar1, !!(eflags & X86_EFL_CF)));
2379
2380 /* Update guest's eflags and finish. */
2381 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2382 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2383
2384 /* And write it back */
2385 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &valpar1, 1);
2386 if (RT_SUCCESS(rc))
2387 {
2388 /* All done! */
2389 *pcbSize = 1;
2390 return VINF_SUCCESS;
2391 }
2392#ifdef IN_RC
2393 }
2394 }
2395#endif
2396 return VERR_EM_INTERPRETER;
2397}
2398
2399
2400#ifndef VBOX_COMPARE_IEM_AND_EM
2401/**
2402 * LOCK BTR/C/S Emulation.
2403 */
2404static int emInterpretLockBitTest(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
2405 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate)
2406{
2407 void *pvParam1;
2408
2409 DISQPVPARAMVAL param1, param2;
2410 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2411 if(RT_FAILURE(rc))
2412 return VERR_EM_INTERPRETER;
2413
2414 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2415 if(RT_FAILURE(rc))
2416 return VERR_EM_INTERPRETER;
2417
2418 /* The destination is always a virtual address */
2419 if (param1.type != DISQPV_TYPE_ADDRESS)
2420 return VERR_EM_INTERPRETER;
2421
2422 /* Register and immediate data == DISQPV_TYPE_IMMEDIATE */
2423 AssertReturn(param2.type == DISQPV_TYPE_IMMEDIATE, VERR_EM_INTERPRETER);
2424 uint64_t ValPar2 = param2.val.val64;
2425
2426 /* Adjust the parameters so what we're dealing with is a bit within the byte pointed to. */
2427 RTGCPTR GCPtrPar1 = param1.val.val64;
2428 GCPtrPar1 = (GCPtrPar1 + ValPar2 / 8);
2429 ValPar2 &= 7;
2430
2431 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2432#ifdef IN_RC
2433 Assert(TRPMHasTrap(pVCpu));
2434 EM_ASSERT_FAULT_RETURN((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, VERR_EM_INTERPRETER);
2435#endif
2436
2437 PGMPAGEMAPLOCK Lock;
2438 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2439 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2440
2441 Log2(("emInterpretLockBitTest %s: pvFault=%RGv GCPtrPar1=%RGv imm=%RX64\n", emGetMnemonic(pDis), pvFault, GCPtrPar1, ValPar2));
2442 NOREF(pvFault);
2443
2444 /* Try emulate it with a one-shot #PF handler in place. (RC) */
2445 RTGCUINTREG32 eflags = 0;
2446 rc = pfnEmulate(pvParam1, ValPar2, &eflags);
2447 PGMPhysReleasePageMappingLock(pVM, &Lock);
2448 if (RT_FAILURE(rc))
2449 {
2450 Log(("emInterpretLockBitTest %s: %RGv imm%d=%RX64 -> emulation failed due to page fault!\n",
2451 emGetMnemonic(pDis), GCPtrPar1, pDis->Param2.cb*8, ValPar2));
2452 return VERR_EM_INTERPRETER;
2453 }
2454
2455 Log2(("emInterpretLockBitTest %s: GCPtrPar1=%RGv imm=%RX64 CF=%d\n", emGetMnemonic(pDis), GCPtrPar1, ValPar2, !!(eflags & X86_EFL_CF)));
2456
2457 /* Update guest's eflags and finish. */
2458 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2459 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2460
2461 *pcbSize = 1;
2462 return VINF_SUCCESS;
2463}
2464#endif /* !VBOX_COMPARE_IEM_AND_EM */
2465
2466
2467/**
2468 * MOV emulation.
2469 */
2470static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2471{
2472 NOREF(pvFault);
2473 DISQPVPARAMVAL param1, param2;
2474 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_DST);
2475 if(RT_FAILURE(rc))
2476 return VERR_EM_INTERPRETER;
2477
2478 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2479 if(RT_FAILURE(rc))
2480 return VERR_EM_INTERPRETER;
2481
2482 /* If destination is a segment register, punt. We can't handle it here.
2483 * NB: Source can be a register and still trigger a #PF!
2484 */
2485 if (RT_UNLIKELY(pDis->Param1.fUse == DISUSE_REG_SEG))
2486 return VERR_EM_INTERPRETER;
2487
2488 if (param1.type == DISQPV_TYPE_ADDRESS)
2489 {
2490 RTGCPTR pDest;
2491 uint64_t val64;
2492
2493 switch(param1.type)
2494 {
2495 case DISQPV_TYPE_IMMEDIATE:
2496 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2497 return VERR_EM_INTERPRETER;
2498 /* fallthru */
2499
2500 case DISQPV_TYPE_ADDRESS:
2501 pDest = (RTGCPTR)param1.val.val64;
2502 pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
2503 break;
2504
2505 default:
2506 AssertFailed();
2507 return VERR_EM_INTERPRETER;
2508 }
2509
2510 switch(param2.type)
2511 {
2512 case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
2513 val64 = param2.val.val64;
2514 break;
2515
2516 default:
2517 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
2518 return VERR_EM_INTERPRETER;
2519 }
2520#ifdef LOG_ENABLED
2521 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2522 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
2523 else
2524 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
2525#endif
2526
2527 Assert(param2.size <= 8 && param2.size > 0);
2528 EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
2529 rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
2530 if (RT_FAILURE(rc))
2531 return VERR_EM_INTERPRETER;
2532
2533 *pcbSize = param2.size;
2534 }
2535#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
2536 /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
2537 else if ( param1.type == DISQPV_TYPE_REGISTER
2538 && param2.type == DISQPV_TYPE_REGISTER)
2539 {
2540 AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
2541 AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
2542 AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
2543
2544 uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
2545 uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
2546
2547 Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
2548 switch (param1.size)
2549 {
2550 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break;
2551 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
2552 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
2553 default:
2554 AssertFailed();
2555 return VERR_EM_INTERPRETER;
2556 }
2557 AssertRCReturn(rc, rc);
2558 }
2559#endif
2560 else
2561 { /* read fault */
2562 RTGCPTR pSrc;
2563 uint64_t val64;
2564
2565 /* Source */
2566 switch(param2.type)
2567 {
2568 case DISQPV_TYPE_IMMEDIATE:
2569 if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
2570 return VERR_EM_INTERPRETER;
2571 /* fallthru */
2572
2573 case DISQPV_TYPE_ADDRESS:
2574 pSrc = (RTGCPTR)param2.val.val64;
2575 pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
2576 break;
2577
2578 default:
2579 return VERR_EM_INTERPRETER;
2580 }
2581
2582 Assert(param1.size <= 8 && param1.size > 0);
2583 EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
2584 rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
2585 if (RT_FAILURE(rc))
2586 return VERR_EM_INTERPRETER;
2587
2588 /* Destination */
2589 switch(param1.type)
2590 {
2591 case DISQPV_TYPE_REGISTER:
2592 switch(param1.size)
2593 {
2594 case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break;
2595 case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
2596 case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
2597 case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
2598 default:
2599 return VERR_EM_INTERPRETER;
2600 }
2601 if (RT_FAILURE(rc))
2602 return rc;
2603 break;
2604
2605 default:
2606 return VERR_EM_INTERPRETER;
2607 }
2608#ifdef LOG_ENABLED
2609 if (pDis->uCpuMode == DISCPUMODE_64BIT)
2610 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
2611 else
2612 LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
2613#endif
2614 }
2615 return VINF_SUCCESS;
2616}
2617
2618
2619#ifndef IN_RC
2620/**
2621 * [REP] STOSWD emulation
2622 */
2623static int emInterpretStosWD(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2624{
2625 int rc;
2626 RTGCPTR GCDest, GCOffset;
2627 uint32_t cbSize;
2628 uint64_t cTransfers;
2629 int offIncrement;
2630 NOREF(pvFault);
2631
2632 /* Don't support any but these three prefix bytes. */
2633 if ((pDis->fPrefix & ~(DISPREFIX_ADDRSIZE|DISPREFIX_OPSIZE|DISPREFIX_REP|DISPREFIX_REX)))
2634 return VERR_EM_INTERPRETER;
2635
2636 switch (pDis->uAddrMode)
2637 {
2638 case DISCPUMODE_16BIT:
2639 GCOffset = pRegFrame->di;
2640 cTransfers = pRegFrame->cx;
2641 break;
2642 case DISCPUMODE_32BIT:
2643 GCOffset = pRegFrame->edi;
2644 cTransfers = pRegFrame->ecx;
2645 break;
2646 case DISCPUMODE_64BIT:
2647 GCOffset = pRegFrame->rdi;
2648 cTransfers = pRegFrame->rcx;
2649 break;
2650 default:
2651 AssertFailed();
2652 return VERR_EM_INTERPRETER;
2653 }
2654
2655 GCDest = SELMToFlat(pVM, DISSELREG_ES, pRegFrame, GCOffset);
2656 switch (pDis->uOpMode)
2657 {
2658 case DISCPUMODE_16BIT:
2659 cbSize = 2;
2660 break;
2661 case DISCPUMODE_32BIT:
2662 cbSize = 4;
2663 break;
2664 case DISCPUMODE_64BIT:
2665 cbSize = 8;
2666 break;
2667 default:
2668 AssertFailed();
2669 return VERR_EM_INTERPRETER;
2670 }
2671
2672 offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cbSize : (signed)cbSize;
2673
2674 if (!(pDis->fPrefix & DISPREFIX_REP))
2675 {
2676 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize));
2677
2678 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2679 if (RT_FAILURE(rc))
2680 return VERR_EM_INTERPRETER;
2681 Assert(rc == VINF_SUCCESS);
2682
2683 /* Update (e/r)di. */
2684 switch (pDis->uAddrMode)
2685 {
2686 case DISCPUMODE_16BIT:
2687 pRegFrame->di += offIncrement;
2688 break;
2689 case DISCPUMODE_32BIT:
2690 pRegFrame->edi += offIncrement;
2691 break;
2692 case DISCPUMODE_64BIT:
2693 pRegFrame->rdi += offIncrement;
2694 break;
2695 default:
2696 AssertFailed();
2697 return VERR_EM_INTERPRETER;
2698 }
2699
2700 }
2701 else
2702 {
2703 if (!cTransfers)
2704 return VINF_SUCCESS;
2705
2706 /*
2707 * Do *not* try emulate cross page stuff here because we don't know what might
2708 * be waiting for us on the subsequent pages. The caller has only asked us to
2709 * ignore access handlers fro the current page.
2710 * This also fends off big stores which would quickly kill PGMR0DynMap.
2711 */
2712 if ( cbSize > PAGE_SIZE
2713 || cTransfers > PAGE_SIZE
2714 || (GCDest >> PAGE_SHIFT) != ((GCDest + offIncrement * cTransfers) >> PAGE_SHIFT))
2715 {
2716 Log(("STOSWD is crosses pages, chicken out to the recompiler; GCDest=%RGv cbSize=%#x offIncrement=%d cTransfers=%#x\n",
2717 GCDest, cbSize, offIncrement, cTransfers));
2718 return VERR_EM_INTERPRETER;
2719 }
2720
2721 LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es.Sel, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
2722 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2723 rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
2724 cTransfers * cbSize,
2725 X86_PTE_RW | (CPUMGetGuestCPL(pVCpu) == 3 ? X86_PTE_US : 0));
2726 if (rc != VINF_SUCCESS)
2727 {
2728 Log(("STOSWD will generate a trap -> recompiler, rc=%d\n", rc));
2729 return VERR_EM_INTERPRETER;
2730 }
2731
2732 /* REP case */
2733 while (cTransfers)
2734 {
2735 rc = emRamWrite(pVM, pVCpu, pRegFrame, GCDest, &pRegFrame->rax, cbSize);
2736 if (RT_FAILURE(rc))
2737 {
2738 rc = VERR_EM_INTERPRETER;
2739 break;
2740 }
2741
2742 Assert(rc == VINF_SUCCESS);
2743 GCOffset += offIncrement;
2744 GCDest += offIncrement;
2745 cTransfers--;
2746 }
2747
2748 /* Update the registers. */
2749 switch (pDis->uAddrMode)
2750 {
2751 case DISCPUMODE_16BIT:
2752 pRegFrame->di = GCOffset;
2753 pRegFrame->cx = cTransfers;
2754 break;
2755 case DISCPUMODE_32BIT:
2756 pRegFrame->edi = GCOffset;
2757 pRegFrame->ecx = cTransfers;
2758 break;
2759 case DISCPUMODE_64BIT:
2760 pRegFrame->rdi = GCOffset;
2761 pRegFrame->rcx = cTransfers;
2762 break;
2763 default:
2764 AssertFailed();
2765 return VERR_EM_INTERPRETER;
2766 }
2767 }
2768
2769 *pcbSize = cbSize;
2770 return rc;
2771}
2772#endif /* !IN_RC */
2773
2774
2775/**
2776 * [LOCK] CMPXCHG emulation.
2777 */
2778static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2779{
2780 DISQPVPARAMVAL param1, param2;
2781 NOREF(pvFault);
2782
2783#if HC_ARCH_BITS == 32
2784 Assert(pDis->Param1.cb <= 4);
2785#endif
2786
2787 /* Source to make DISQueryParamVal read the register value - ugly hack */
2788 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2789 if(RT_FAILURE(rc))
2790 return VERR_EM_INTERPRETER;
2791
2792 rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param2, &param2, DISQPVWHICH_SRC);
2793 if(RT_FAILURE(rc))
2794 return VERR_EM_INTERPRETER;
2795
2796 uint64_t valpar;
2797 switch(param2.type)
2798 {
2799 case DISQPV_TYPE_IMMEDIATE: /* register actually */
2800 valpar = param2.val.val64;
2801 break;
2802
2803 default:
2804 return VERR_EM_INTERPRETER;
2805 }
2806
2807 PGMPAGEMAPLOCK Lock;
2808 RTGCPTR GCPtrPar1;
2809 void *pvParam1;
2810 uint64_t eflags;
2811
2812 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2813 switch(param1.type)
2814 {
2815 case DISQPV_TYPE_ADDRESS:
2816 GCPtrPar1 = param1.val.val64;
2817 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2818
2819 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2820 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2821 break;
2822
2823 default:
2824 return VERR_EM_INTERPRETER;
2825 }
2826
2827 LogFlow(("%s %RGv rax=%RX64 %RX64\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar));
2828
2829#ifndef VBOX_COMPARE_IEM_AND_EM
2830 if (pDis->fPrefix & DISPREFIX_LOCK)
2831 eflags = EMEmulateLockCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2832 else
2833 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pDis->Param2.cb);
2834#else /* VBOX_COMPARE_IEM_AND_EM */
2835 uint64_t u64;
2836 switch (pDis->Param2.cb)
2837 {
2838 case 1: u64 = *(uint8_t *)pvParam1; break;
2839 case 2: u64 = *(uint16_t *)pvParam1; break;
2840 case 4: u64 = *(uint32_t *)pvParam1; break;
2841 default:
2842 case 8: u64 = *(uint64_t *)pvParam1; break;
2843 }
2844 eflags = EMEmulateCmpXchg(&u64, &pRegFrame->rax, valpar, pDis->Param2.cb);
2845 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
2846#endif /* VBOX_COMPARE_IEM_AND_EM */
2847
2848 LogFlow(("%s %RGv rax=%RX64 %RX64 ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF)));
2849
2850 /* Update guest's eflags and finish. */
2851 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2852 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2853
2854 *pcbSize = param2.size;
2855 PGMPhysReleasePageMappingLock(pVM, &Lock);
2856 return VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * [LOCK] CMPXCHG8B emulation.
2862 */
2863static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2864{
2865 DISQPVPARAMVAL param1;
2866 NOREF(pvFault);
2867
2868 /* Source to make DISQueryParamVal read the register value - ugly hack */
2869 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2870 if(RT_FAILURE(rc))
2871 return VERR_EM_INTERPRETER;
2872
2873 RTGCPTR GCPtrPar1;
2874 void *pvParam1;
2875 uint64_t eflags;
2876 PGMPAGEMAPLOCK Lock;
2877
2878 AssertReturn(pDis->Param1.cb == 8, VERR_EM_INTERPRETER);
2879 switch(param1.type)
2880 {
2881 case DISQPV_TYPE_ADDRESS:
2882 GCPtrPar1 = param1.val.val64;
2883 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, GCPtrPar1);
2884
2885 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2886 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2887 break;
2888
2889 default:
2890 return VERR_EM_INTERPRETER;
2891 }
2892
2893 LogFlow(("%s %RGv=%p eax=%08x\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax));
2894
2895#ifndef VBOX_COMPARE_IEM_AND_EM
2896 if (pDis->fPrefix & DISPREFIX_LOCK)
2897 eflags = EMEmulateLockCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2898 else
2899 eflags = EMEmulateCmpXchg8b(pvParam1, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2900#else /* VBOX_COMPARE_IEM_AND_EM */
2901 uint64_t u64 = *(uint64_t *)pvParam1;
2902 eflags = EMEmulateCmpXchg8b(&u64, &pRegFrame->eax, &pRegFrame->edx, pRegFrame->ebx, pRegFrame->ecx);
2903 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, sizeof(u64)); AssertRCSuccess(rc2);
2904#endif /* VBOX_COMPARE_IEM_AND_EM */
2905
2906 LogFlow(("%s %RGv=%p eax=%08x ZF=%d\n", emGetMnemonic(pDis), GCPtrPar1, pvParam1, pRegFrame->eax, !!(eflags & X86_EFL_ZF)));
2907
2908 /* Update guest's eflags and finish; note that *only* ZF is affected. */
2909 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_ZF))
2910 | (eflags & (X86_EFL_ZF));
2911
2912 *pcbSize = 8;
2913 PGMPhysReleasePageMappingLock(pVM, &Lock);
2914 return VINF_SUCCESS;
2915}
2916
2917
2918#ifdef IN_RC /** @todo test+enable for HM as well. */
2919/**
2920 * [LOCK] XADD emulation.
2921 */
2922static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
2923{
2924 Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */
2925 DISQPVPARAMVAL param1;
2926 void *pvParamReg2;
2927 size_t cbParamReg2;
2928 NOREF(pvFault);
2929
2930 /* Source to make DISQueryParamVal read the register value - ugly hack */
2931 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
2932 if(RT_FAILURE(rc))
2933 return VERR_EM_INTERPRETER;
2934
2935 rc = DISQueryParamRegPtr(pRegFrame, pDis, &pDis->Param2, &pvParamReg2, &cbParamReg2);
2936 Assert(cbParamReg2 <= 4);
2937 if(RT_FAILURE(rc))
2938 return VERR_EM_INTERPRETER;
2939
2940#ifdef IN_RC
2941 if (TRPMHasTrap(pVCpu))
2942 {
2943 if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
2944 {
2945#endif
2946 RTGCPTR GCPtrPar1;
2947 void *pvParam1;
2948 uint32_t eflags;
2949 PGMPAGEMAPLOCK Lock;
2950
2951 AssertReturn(pDis->Param1.cb == pDis->Param2.cb, VERR_EM_INTERPRETER);
2952 switch(param1.type)
2953 {
2954 case DISQPV_TYPE_ADDRESS:
2955 GCPtrPar1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, (RTRCUINTPTR)param1.val.val64);
2956#ifdef IN_RC
2957 EM_ASSERT_FAULT_RETURN(GCPtrPar1 == pvFault, VERR_EM_INTERPRETER);
2958#endif
2959
2960 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrPar1, &pvParam1, &Lock);
2961 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2962 break;
2963
2964 default:
2965 return VERR_EM_INTERPRETER;
2966 }
2967
2968 LogFlow(("XAdd %RGv=%p reg=%08llx\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2));
2969
2970#ifndef VBOX_COMPARE_IEM_AND_EM
2971 if (pDis->fPrefix & DISPREFIX_LOCK)
2972 eflags = EMEmulateLockXAdd(pvParam1, pvParamReg2, cbParamReg2);
2973 else
2974 eflags = EMEmulateXAdd(pvParam1, pvParamReg2, cbParamReg2);
2975#else /* VBOX_COMPARE_IEM_AND_EM */
2976 uint64_t u64;
2977 switch (cbParamReg2)
2978 {
2979 case 1: u64 = *(uint8_t *)pvParam1; break;
2980 case 2: u64 = *(uint16_t *)pvParam1; break;
2981 case 4: u64 = *(uint32_t *)pvParam1; break;
2982 default:
2983 case 8: u64 = *(uint64_t *)pvParam1; break;
2984 }
2985 eflags = EMEmulateXAdd(&u64, pvParamReg2, cbParamReg2);
2986 int rc2 = emRamWrite(pVM, pVCpu, pRegFrame, GCPtrPar1, &u64, pDis->Param2.cb); AssertRCSuccess(rc2);
2987#endif /* VBOX_COMPARE_IEM_AND_EM */
2988
2989 LogFlow(("XAdd %RGv=%p reg=%08llx ZF=%d\n", GCPtrPar1, pvParam1, *(uint64_t *)pvParamReg2, !!(eflags & X86_EFL_ZF) ));
2990
2991 /* Update guest's eflags and finish. */
2992 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
2993 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
2994
2995 *pcbSize = cbParamReg2;
2996 PGMPhysReleasePageMappingLock(pVM, &Lock);
2997 return VINF_SUCCESS;
2998#ifdef IN_RC
2999 }
3000 }
3001
3002 return VERR_EM_INTERPRETER;
3003#endif
3004}
3005#endif /* IN_RC */
3006
3007
3008/**
3009 * WBINVD Emulation.
3010 */
3011static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3012{
3013 /* Nothing to do. */
3014 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3015 return VINF_SUCCESS;
3016}
3017
3018
3019/**
3020 * INVLPG Emulation.
3021 */
3022static VBOXSTRICTRC emInterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3023{
3024 DISQPVPARAMVAL param1;
3025 RTGCPTR addr;
3026 NOREF(pvFault); NOREF(pVM); NOREF(pcbSize);
3027
3028 VBOXSTRICTRC rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3029 if(RT_FAILURE(rc))
3030 return VERR_EM_INTERPRETER;
3031
3032 switch(param1.type)
3033 {
3034 case DISQPV_TYPE_IMMEDIATE:
3035 case DISQPV_TYPE_ADDRESS:
3036 if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
3037 return VERR_EM_INTERPRETER;
3038 addr = (RTGCPTR)param1.val.val64;
3039 break;
3040
3041 default:
3042 return VERR_EM_INTERPRETER;
3043 }
3044
3045 /** @todo is addr always a flat linear address or ds based
3046 * (in absence of segment override prefixes)????
3047 */
3048#ifdef IN_RC
3049 LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
3050#endif
3051 rc = PGMInvalidatePage(pVCpu, addr);
3052 if ( rc == VINF_SUCCESS
3053 || rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
3054 return VINF_SUCCESS;
3055 AssertMsgReturn(rc == VINF_EM_RAW_EMULATE_INSTR,
3056 ("%Rrc addr=%RGv\n", VBOXSTRICTRC_VAL(rc), addr),
3057 VERR_EM_INTERPRETER);
3058 return rc;
3059}
3060
3061/** @todo change all these EMInterpretXXX methods to VBOXSTRICTRC. */
3062
3063/**
3064 * CPUID Emulation.
3065 */
3066static int emInterpretCpuId(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3067{
3068 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3069 int rc = EMInterpretCpuId(pVM, pVCpu, pRegFrame);
3070 return rc;
3071}
3072
3073
3074/**
3075 * CLTS Emulation.
3076 */
3077static int emInterpretClts(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3078{
3079 NOREF(pVM); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3080
3081 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3082 if (!(cr0 & X86_CR0_TS))
3083 return VINF_SUCCESS;
3084 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS);
3085}
3086
3087
3088/**
3089 * Update CRx.
3090 *
3091 * @returns VBox status code.
3092 * @param pVM The cross context VM structure.
3093 * @param pVCpu The cross context virtual CPU structure.
3094 * @param pRegFrame The register frame.
3095 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3096 * @param val New CRx value
3097 *
3098 */
3099static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)
3100{
3101 uint64_t oldval;
3102 uint64_t msrEFER;
3103 uint32_t fValid;
3104 int rc, rc2;
3105 NOREF(pVM);
3106
3107 /** @todo Clean up this mess. */
3108 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));
3109 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3110 switch (DestRegCrx)
3111 {
3112 case DISCREG_CR0:
3113 oldval = CPUMGetGuestCR0(pVCpu);
3114#ifdef IN_RC
3115 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */
3116 if ( (val & (X86_CR0_WP | X86_CR0_AM))
3117 != (oldval & (X86_CR0_WP | X86_CR0_AM)))
3118 return VERR_EM_INTERPRETER;
3119#endif
3120 rc = VINF_SUCCESS;
3121#if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)
3122 CPUMSetGuestCR0(pVCpu, val);
3123#else
3124 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;
3125#endif
3126 val = CPUMGetGuestCR0(pVCpu);
3127 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3128 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
3129 {
3130 /* global flush */
3131 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3132 AssertRCReturn(rc, rc);
3133 }
3134
3135 /* Deal with long mode enabling/disabling. */
3136 msrEFER = CPUMGetGuestEFER(pVCpu);
3137 if (msrEFER & MSR_K6_EFER_LME)
3138 {
3139 if ( !(oldval & X86_CR0_PG)
3140 && (val & X86_CR0_PG))
3141 {
3142 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3143 if (pRegFrame->cs.Attr.n.u1Long)
3144 {
3145 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));
3146 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3147 }
3148
3149 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3150 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))
3151 {
3152 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));
3153 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3154 }
3155 msrEFER |= MSR_K6_EFER_LMA;
3156 }
3157 else
3158 if ( (oldval & X86_CR0_PG)
3159 && !(val & X86_CR0_PG))
3160 {
3161 msrEFER &= ~MSR_K6_EFER_LMA;
3162 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */
3163 }
3164 CPUMSetGuestEFER(pVCpu, msrEFER);
3165 }
3166 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3167 return rc2 == VINF_SUCCESS ? rc : rc2;
3168
3169 case DISCREG_CR2:
3170 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);
3171 return VINF_SUCCESS;
3172
3173 case DISCREG_CR3:
3174 /* Reloading the current CR3 means the guest just wants to flush the TLBs */
3175 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);
3176 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)
3177 {
3178 /* flush */
3179 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
3180 AssertRC(rc);
3181 }
3182 return rc;
3183
3184 case DISCREG_CR4:
3185 oldval = CPUMGetGuestCR4(pVCpu);
3186 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);
3187 val = CPUMGetGuestCR4(pVCpu);
3188
3189 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
3190 msrEFER = CPUMGetGuestEFER(pVCpu);
3191 if ( (msrEFER & MSR_K6_EFER_LMA)
3192 && (oldval & X86_CR4_PAE)
3193 && !(val & X86_CR4_PAE))
3194 {
3195 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3196 }
3197
3198 /* From IEM iemCImpl_load_CrX. */
3199 /** @todo Check guest CPUID bits for determining corresponding valid bits. */
3200 fValid = X86_CR4_VME | X86_CR4_PVI
3201 | X86_CR4_TSD | X86_CR4_DE
3202 | X86_CR4_PSE | X86_CR4_PAE
3203 | X86_CR4_MCE | X86_CR4_PGE
3204 | X86_CR4_PCE | X86_CR4_OSFXSR
3205 | X86_CR4_OSXMMEEXCPT;
3206 //if (xxx)
3207 // fValid |= X86_CR4_VMXE;
3208 //if (xxx)
3209 // fValid |= X86_CR4_OSXSAVE;
3210 if (val & ~(uint64_t)fValid)
3211 {
3212 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));
3213 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */
3214 }
3215
3216 rc = VINF_SUCCESS;
3217 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))
3218 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))
3219 {
3220 /* global flush */
3221 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
3222 AssertRCReturn(rc, rc);
3223 }
3224
3225 /* Feeling extremely lazy. */
3226# ifdef IN_RC
3227 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))
3228 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))
3229 {
3230 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
3231 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
3232 }
3233# endif
3234# ifdef VBOX_WITH_RAW_MODE
3235 if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM))
3236 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3237# endif
3238
3239 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
3240 return rc2 == VINF_SUCCESS ? rc : rc2;
3241
3242 case DISCREG_CR8:
3243 return PDMApicSetTPR(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3244
3245 default:
3246 AssertFailed();
3247 case DISCREG_CR1: /* illegal op */
3248 break;
3249 }
3250 return VERR_EM_INTERPRETER;
3251}
3252
3253
3254/**
3255 * LMSW Emulation.
3256 */
3257static int emInterpretLmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3258{
3259 DISQPVPARAMVAL param1;
3260 uint32_t val;
3261 NOREF(pvFault); NOREF(pcbSize);
3262 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3263
3264 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3265 if(RT_FAILURE(rc))
3266 return VERR_EM_INTERPRETER;
3267
3268 switch(param1.type)
3269 {
3270 case DISQPV_TYPE_IMMEDIATE:
3271 case DISQPV_TYPE_ADDRESS:
3272 if(!(param1.flags & DISQPV_FLAG_16))
3273 return VERR_EM_INTERPRETER;
3274 val = param1.val.val32;
3275 break;
3276
3277 default:
3278 return VERR_EM_INTERPRETER;
3279 }
3280
3281 LogFlow(("emInterpretLmsw %x\n", val));
3282 uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu);
3283
3284 /* Only PE, MP, EM and TS can be changed; note that PE can't be cleared by this instruction. */
3285 uint64_t NewCr0 = ( OldCr0 & ~( X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
3286 | (val & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS));
3287
3288 return emUpdateCRx(pVM, pVCpu, pRegFrame, DISCREG_CR0, NewCr0);
3289
3290}
3291
3292#ifdef EM_EMULATE_SMSW
3293/**
3294 * SMSW Emulation.
3295 */
3296static int emInterpretSmsw(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3297{
3298 NOREF(pvFault); NOREF(pcbSize);
3299 DISQPVPARAMVAL param1;
3300 uint64_t cr0 = CPUMGetGuestCR0(pVCpu);
3301
3302 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3303 if(RT_FAILURE(rc))
3304 return VERR_EM_INTERPRETER;
3305
3306 switch(param1.type)
3307 {
3308 case DISQPV_TYPE_IMMEDIATE:
3309 if(param1.size != sizeof(uint16_t))
3310 return VERR_EM_INTERPRETER;
3311 LogFlow(("emInterpretSmsw %d <- cr0 (%x)\n", pDis->Param1.Base.idxGenReg, cr0));
3312 rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, cr0);
3313 break;
3314
3315 case DISQPV_TYPE_ADDRESS:
3316 {
3317 RTGCPTR pParam1;
3318
3319 /* Actually forced to 16 bits regardless of the operand size. */
3320 if(param1.size != sizeof(uint16_t))
3321 return VERR_EM_INTERPRETER;
3322
3323 pParam1 = (RTGCPTR)param1.val.val64;
3324 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pParam1);
3325 LogFlow(("emInterpretSmsw %RGv <- cr0 (%x)\n", pParam1, cr0));
3326
3327 rc = emRamWrite(pVM, pVCpu, pRegFrame, pParam1, &cr0, sizeof(uint16_t));
3328 if (RT_FAILURE(rc))
3329 {
3330 AssertMsgFailed(("emRamWrite %RGv size=%d failed with %Rrc\n", pParam1, param1.size, rc));
3331 return VERR_EM_INTERPRETER;
3332 }
3333 break;
3334 }
3335
3336 default:
3337 return VERR_EM_INTERPRETER;
3338 }
3339
3340 LogFlow(("emInterpretSmsw %x\n", cr0));
3341 return rc;
3342}
3343#endif
3344
3345
3346/**
3347 * Interpret CRx read.
3348 *
3349 * @returns VBox status code.
3350 * @param pVM The cross context VM structure.
3351 * @param pVCpu The cross context virtual CPU structure.
3352 * @param pRegFrame The register frame.
3353 * @param DestRegGen General purpose register index (USE_REG_E**))
3354 * @param SrcRegCrx CRx register index (DISUSE_REG_CR*)
3355 *
3356 */
3357static int emInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx)
3358{
3359 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3360 uint64_t val64;
3361 int rc = CPUMGetGuestCRx(pVCpu, SrcRegCrx, &val64);
3362 AssertMsgRCReturn(rc, ("CPUMGetGuestCRx %d failed\n", SrcRegCrx), VERR_EM_INTERPRETER);
3363 NOREF(pVM);
3364
3365 if (CPUMIsGuestIn64BitCode(pVCpu))
3366 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
3367 else
3368 rc = DISWriteReg32(pRegFrame, DestRegGen, val64);
3369
3370 if (RT_SUCCESS(rc))
3371 {
3372 LogFlow(("MOV_CR: gen32=%d CR=%d val=%RX64\n", DestRegGen, SrcRegCrx, val64));
3373 return VINF_SUCCESS;
3374 }
3375 return VERR_EM_INTERPRETER;
3376}
3377
3378
3379/**
3380 * Interpret CRx write.
3381 *
3382 * @returns VBox status code.
3383 * @param pVM The cross context VM structure.
3384 * @param pVCpu The cross context virtual CPU structure.
3385 * @param pRegFrame The register frame.
3386 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)
3387 * @param SrcRegGen General purpose register index (USE_REG_E**))
3388 *
3389 */
3390static int emInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen)
3391{
3392 uint64_t val;
3393 int rc;
3394 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3395
3396 if (CPUMIsGuestIn64BitCode(pVCpu))
3397 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val);
3398 else
3399 {
3400 uint32_t val32;
3401 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
3402 val = val32;
3403 }
3404
3405 if (RT_SUCCESS(rc))
3406 return emUpdateCRx(pVM, pVCpu, pRegFrame, DestRegCrx, val);
3407
3408 return VERR_EM_INTERPRETER;
3409}
3410
3411
3412/**
3413 * MOV CRx
3414 */
3415static int emInterpretMovCRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3416{
3417 NOREF(pvFault); NOREF(pcbSize);
3418 if ((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_CR)
3419 return emInterpretCRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxCtrlReg);
3420
3421 if (pDis->Param1.fUse == DISUSE_REG_CR && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3422 return emInterpretCRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxCtrlReg, pDis->Param2.Base.idxGenReg);
3423
3424 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
3425}
3426
3427
3428/**
3429 * MOV DRx
3430 */
3431static int emInterpretMovDRx(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3432{
3433 int rc = VERR_EM_INTERPRETER;
3434 NOREF(pvFault); NOREF(pcbSize);
3435
3436 if((pDis->Param1.fUse == DISUSE_REG_GEN32 || pDis->Param1.fUse == DISUSE_REG_GEN64) && pDis->Param2.fUse == DISUSE_REG_DBG)
3437 {
3438 rc = EMInterpretDRxRead(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxGenReg, pDis->Param2.Base.idxDbgReg);
3439 }
3440 else
3441 if(pDis->Param1.fUse == DISUSE_REG_DBG && (pDis->Param2.fUse == DISUSE_REG_GEN32 || pDis->Param2.fUse == DISUSE_REG_GEN64))
3442 {
3443 rc = EMInterpretDRxWrite(pVM, pVCpu, pRegFrame, pDis->Param1.Base.idxDbgReg, pDis->Param2.Base.idxGenReg);
3444 }
3445 else
3446 AssertMsgFailed(("Unexpected debug register move\n"));
3447
3448 return rc;
3449}
3450
3451
3452/**
3453 * LLDT Emulation.
3454 */
3455static int emInterpretLLdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3456{
3457 DISQPVPARAMVAL param1;
3458 RTSEL sel;
3459 NOREF(pVM); NOREF(pvFault); NOREF(pcbSize);
3460
3461 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3462 if(RT_FAILURE(rc))
3463 return VERR_EM_INTERPRETER;
3464
3465 switch(param1.type)
3466 {
3467 case DISQPV_TYPE_ADDRESS:
3468 return VERR_EM_INTERPRETER; //feeling lazy right now
3469
3470 case DISQPV_TYPE_IMMEDIATE:
3471 if(!(param1.flags & DISQPV_FLAG_16))
3472 return VERR_EM_INTERPRETER;
3473 sel = (RTSEL)param1.val.val16;
3474 break;
3475
3476 default:
3477 return VERR_EM_INTERPRETER;
3478 }
3479
3480#ifdef IN_RING0
3481 /* Only for the VT-x real-mode emulation case. */
3482 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3483 CPUMSetGuestLDTR(pVCpu, sel);
3484 return VINF_SUCCESS;
3485#else
3486 if (sel == 0)
3487 {
3488 if (CPUMGetHyperLDTR(pVCpu) == 0)
3489 {
3490 // this simple case is most frequent in Windows 2000 (31k - boot & shutdown)
3491 return VINF_SUCCESS;
3492 }
3493 }
3494 //still feeling lazy
3495 return VERR_EM_INTERPRETER;
3496#endif
3497}
3498
3499#ifdef IN_RING0
3500/**
3501 * LIDT/LGDT Emulation.
3502 */
3503static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3504{
3505 DISQPVPARAMVAL param1;
3506 RTGCPTR pParam1;
3507 X86XDTR32 dtr32;
3508 NOREF(pvFault); NOREF(pcbSize);
3509
3510 Log(("Emulate %s at %RGv\n", emGetMnemonic(pDis), (RTGCPTR)pRegFrame->rip));
3511
3512 /* Only for the VT-x real-mode emulation case. */
3513 AssertReturn(CPUMIsGuestInRealMode(pVCpu), VERR_EM_INTERPRETER);
3514
3515 int rc = DISQueryParamVal(pRegFrame, pDis, &pDis->Param1, &param1, DISQPVWHICH_SRC);
3516 if(RT_FAILURE(rc))
3517 return VERR_EM_INTERPRETER;
3518
3519 switch(param1.type)
3520 {
3521 case DISQPV_TYPE_ADDRESS:
3522 pParam1 = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, param1.val.val16);
3523 break;
3524
3525 default:
3526 return VERR_EM_INTERPRETER;
3527 }
3528
3529 rc = emRamRead(pVM, pVCpu, pRegFrame, &dtr32, pParam1, sizeof(dtr32));
3530 AssertRCReturn(rc, VERR_EM_INTERPRETER);
3531
3532 if (!(pDis->fPrefix & DISPREFIX_OPSIZE))
3533 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */
3534
3535 if (pDis->pCurInstr->uOpcode == OP_LIDT)
3536 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3537 else
3538 CPUMSetGuestGDTR(pVCpu, dtr32.uAddr, dtr32.cb);
3539
3540 return VINF_SUCCESS;
3541}
3542#endif
3543
3544
3545#ifdef IN_RC
3546/**
3547 * STI Emulation.
3548 *
3549 * @remark the instruction following sti is guaranteed to be executed before any interrupts are dispatched
3550 */
3551static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3552{
3553 NOREF(pcbSize);
3554 PPATMGCSTATE pGCState = PATMGetGCState(pVM);
3555
3556 if(!pGCState)
3557 {
3558 Assert(pGCState);
3559 return VERR_EM_INTERPRETER;
3560 }
3561 pGCState->uVMFlags |= X86_EFL_IF;
3562
3563 Assert(pRegFrame->eflags.u32 & X86_EFL_IF);
3564 Assert(pvFault == SELMToFlat(pVM, DISSELREG_CS, pRegFrame, (RTGCPTR)pRegFrame->rip));
3565
3566 pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDis->cbInstr;
3567 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3568
3569 return VINF_SUCCESS;
3570}
3571#endif /* IN_RC */
3572
3573
3574/**
3575 * HLT Emulation.
3576 */
3577static VBOXSTRICTRC
3578emInterpretHlt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3579{
3580 NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
3581 return VINF_EM_HALT;
3582}
3583
3584
3585/**
3586 * RDTSC Emulation.
3587 */
3588static int emInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3589{
3590 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3591 return EMInterpretRdtsc(pVM, pVCpu, pRegFrame);
3592}
3593
3594/**
3595 * RDPMC Emulation
3596 */
3597static int emInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3598{
3599 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3600 return EMInterpretRdpmc(pVM, pVCpu, pRegFrame);
3601}
3602
3603
3604static int emInterpretMonitor(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3605{
3606 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3607 return EMInterpretMonitor(pVM, pVCpu, pRegFrame);
3608}
3609
3610
3611static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3612{
3613 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3614 return EMInterpretMWait(pVM, pVCpu, pRegFrame);
3615}
3616
3617
3618/**
3619 * RDMSR Emulation.
3620 */
3621static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3622{
3623 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
3624 different, so we play safe by completely disassembling the instruction. */
3625 Assert(!(pDis->fPrefix & DISPREFIX_REX));
3626 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3627 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
3628}
3629
3630
3631/**
3632 * WRMSR Emulation.
3633 */
3634static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
3635{
3636 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize);
3637 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame);
3638}
3639
3640
3641/**
3642 * Internal worker.
3643 * @copydoc emInterpretInstructionCPUOuter
3644 * @param pVM The cross context VM structure.
3645 */
3646DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3647 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3648{
3649 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
3650 Assert(enmCodeType == EMCODETYPE_SUPERVISOR || enmCodeType == EMCODETYPE_ALL);
3651 Assert(pcbSize);
3652 *pcbSize = 0;
3653
3654 if (enmCodeType == EMCODETYPE_SUPERVISOR)
3655 {
3656 /*
3657 * Only supervisor guest code!!
3658 * And no complicated prefixes.
3659 */
3660 /* Get the current privilege level. */
3661 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3662#ifdef VBOX_WITH_RAW_RING1
3663 if ( !EMIsRawRing1Enabled(pVM)
3664 || cpl > 1
3665 || pRegFrame->eflags.Bits.u2IOPL > cpl
3666 )
3667#endif
3668 {
3669 if ( cpl != 0
3670 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */
3671 {
3672 Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
3673 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
3674 return VERR_EM_INTERPRETER;
3675 }
3676 }
3677 }
3678 else
3679 Log2(("emInterpretInstructionCPU allowed to interpret user-level code!!\n"));
3680
3681#ifdef IN_RC
3682 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP))
3683 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3684 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3685 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3686 && pDis->pCurInstr->uOpcode != OP_XADD
3687 && pDis->pCurInstr->uOpcode != OP_OR
3688 && pDis->pCurInstr->uOpcode != OP_AND
3689 && pDis->pCurInstr->uOpcode != OP_XOR
3690 && pDis->pCurInstr->uOpcode != OP_BTR
3691 )
3692 )
3693#else
3694 if ( (pDis->fPrefix & DISPREFIX_REPNE)
3695 || ( (pDis->fPrefix & DISPREFIX_REP)
3696 && pDis->pCurInstr->uOpcode != OP_STOSWD
3697 )
3698 || ( (pDis->fPrefix & DISPREFIX_LOCK)
3699 && pDis->pCurInstr->uOpcode != OP_OR
3700 && pDis->pCurInstr->uOpcode != OP_AND
3701 && pDis->pCurInstr->uOpcode != OP_XOR
3702 && pDis->pCurInstr->uOpcode != OP_BTR
3703 && pDis->pCurInstr->uOpcode != OP_CMPXCHG
3704 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B
3705 )
3706 )
3707#endif
3708 {
3709 //Log(("EMInterpretInstruction: wrong prefix!!\n"));
3710 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedPrefix));
3711 Log4(("EM: Refuse %u on REP/REPNE/LOCK prefix grounds\n", pDis->pCurInstr->uOpcode));
3712 return VERR_EM_INTERPRETER;
3713 }
3714
3715#if HC_ARCH_BITS == 32
3716 /*
3717 * Unable to emulate most >4 bytes accesses in 32 bits mode.
3718 * Whitelisted instructions are safe.
3719 */
3720 if ( pDis->Param1.cb > 4
3721 && CPUMIsGuestIn64BitCode(pVCpu))
3722 {
3723 uint32_t uOpCode = pDis->pCurInstr->uOpcode;
3724 if ( uOpCode != OP_STOSWD
3725 && uOpCode != OP_MOV
3726 && uOpCode != OP_CMPXCHG8B
3727 && uOpCode != OP_XCHG
3728 && uOpCode != OP_BTS
3729 && uOpCode != OP_BTR
3730 && uOpCode != OP_BTC
3731 )
3732 {
3733# ifdef VBOX_WITH_STATISTICS
3734 switch (pDis->pCurInstr->uOpcode)
3735 {
3736# define INTERPRET_FAILED_CASE(opcode, Instr) \
3737 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); break;
3738 INTERPRET_FAILED_CASE(OP_XCHG,Xchg);
3739 INTERPRET_FAILED_CASE(OP_DEC,Dec);
3740 INTERPRET_FAILED_CASE(OP_INC,Inc);
3741 INTERPRET_FAILED_CASE(OP_POP,Pop);
3742 INTERPRET_FAILED_CASE(OP_OR, Or);
3743 INTERPRET_FAILED_CASE(OP_XOR,Xor);
3744 INTERPRET_FAILED_CASE(OP_AND,And);
3745 INTERPRET_FAILED_CASE(OP_MOV,Mov);
3746 INTERPRET_FAILED_CASE(OP_STOSWD,StosWD);
3747 INTERPRET_FAILED_CASE(OP_INVLPG,InvlPg);
3748 INTERPRET_FAILED_CASE(OP_CPUID,CpuId);
3749 INTERPRET_FAILED_CASE(OP_MOV_CR,MovCRx);
3750 INTERPRET_FAILED_CASE(OP_MOV_DR,MovDRx);
3751 INTERPRET_FAILED_CASE(OP_LLDT,LLdt);
3752 INTERPRET_FAILED_CASE(OP_LIDT,LIdt);
3753 INTERPRET_FAILED_CASE(OP_LGDT,LGdt);
3754 INTERPRET_FAILED_CASE(OP_LMSW,Lmsw);
3755 INTERPRET_FAILED_CASE(OP_CLTS,Clts);
3756 INTERPRET_FAILED_CASE(OP_MONITOR,Monitor);
3757 INTERPRET_FAILED_CASE(OP_MWAIT,MWait);
3758 INTERPRET_FAILED_CASE(OP_RDMSR,Rdmsr);
3759 INTERPRET_FAILED_CASE(OP_WRMSR,Wrmsr);
3760 INTERPRET_FAILED_CASE(OP_ADD,Add);
3761 INTERPRET_FAILED_CASE(OP_SUB,Sub);
3762 INTERPRET_FAILED_CASE(OP_ADC,Adc);
3763 INTERPRET_FAILED_CASE(OP_BTR,Btr);
3764 INTERPRET_FAILED_CASE(OP_BTS,Bts);
3765 INTERPRET_FAILED_CASE(OP_BTC,Btc);
3766 INTERPRET_FAILED_CASE(OP_RDTSC,Rdtsc);
3767 INTERPRET_FAILED_CASE(OP_CMPXCHG, CmpXchg);
3768 INTERPRET_FAILED_CASE(OP_STI, Sti);
3769 INTERPRET_FAILED_CASE(OP_XADD,XAdd);
3770 INTERPRET_FAILED_CASE(OP_CMPXCHG8B,CmpXchg8b);
3771 INTERPRET_FAILED_CASE(OP_HLT, Hlt);
3772 INTERPRET_FAILED_CASE(OP_IRET,Iret);
3773 INTERPRET_FAILED_CASE(OP_WBINVD,WbInvd);
3774 INTERPRET_FAILED_CASE(OP_MOVNTPS,MovNTPS);
3775# undef INTERPRET_FAILED_CASE
3776 default:
3777 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3778 break;
3779 }
3780# endif /* VBOX_WITH_STATISTICS */
3781 Log4(("EM: Refuse %u on grounds of accessing %u bytes\n", pDis->pCurInstr->uOpcode, pDis->Param1.cb));
3782 return VERR_EM_INTERPRETER;
3783 }
3784 }
3785#endif
3786
3787 VBOXSTRICTRC rc;
3788#if (defined(VBOX_STRICT) || defined(LOG_ENABLED))
3789 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis)));
3790#endif
3791 switch (pDis->pCurInstr->uOpcode)
3792 {
3793 /*
3794 * Macros for generating the right case statements.
3795 */
3796# ifndef VBOX_COMPARE_IEM_AND_EM
3797# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3798 case opcode:\
3799 if (pDis->fPrefix & DISPREFIX_LOCK) \
3800 rc = emInterpretLock##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulateLock); \
3801 else \
3802 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3803 if (RT_SUCCESS(rc)) \
3804 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3805 else \
3806 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3807 return rc
3808# else /* VBOX_COMPARE_IEM_AND_EM */
3809# define INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3810 case opcode:\
3811 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3812 if (RT_SUCCESS(rc)) \
3813 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3814 else \
3815 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3816 return rc
3817# endif /* VBOX_COMPARE_IEM_AND_EM */
3818
3819#define INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate) \
3820 case opcode:\
3821 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize, pfnEmulate); \
3822 if (RT_SUCCESS(rc)) \
3823 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3824 else \
3825 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3826 return rc
3827
3828#define INTERPRET_CASE_EX_PARAM2(opcode, Instr, InstrFn, pfnEmulate) \
3829 INTERPRET_CASE_EX_PARAM3(opcode, Instr, InstrFn, pfnEmulate)
3830#define INTERPRET_CASE_EX_LOCK_PARAM2(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock) \
3831 INTERPRET_CASE_EX_LOCK_PARAM3(opcode, Instr, InstrFn, pfnEmulate, pfnEmulateLock)
3832
3833#define INTERPRET_CASE(opcode, Instr) \
3834 case opcode:\
3835 rc = emInterpret##Instr(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3836 if (RT_SUCCESS(rc)) \
3837 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3838 else \
3839 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3840 return rc
3841
3842#define INTERPRET_CASE_EX_DUAL_PARAM2(opcode, Instr, InstrFn) \
3843 case opcode:\
3844 rc = emInterpret##InstrFn(pVM, pVCpu, pDis, pRegFrame, pvFault, pcbSize); \
3845 if (RT_SUCCESS(rc)) \
3846 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Instr)); \
3847 else \
3848 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); \
3849 return rc
3850
3851#define INTERPRET_STAT_CASE(opcode, Instr) \
3852 case opcode: STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Failed##Instr)); return VERR_EM_INTERPRETER;
3853
3854 /*
3855 * The actual case statements.
3856 */
3857 INTERPRET_CASE(OP_XCHG,Xchg);
3858 INTERPRET_CASE_EX_PARAM2(OP_DEC,Dec, IncDec, EMEmulateDec);
3859 INTERPRET_CASE_EX_PARAM2(OP_INC,Inc, IncDec, EMEmulateInc);
3860 INTERPRET_CASE(OP_POP,Pop);
3861 INTERPRET_CASE_EX_LOCK_PARAM3(OP_OR, Or, OrXorAnd, EMEmulateOr, EMEmulateLockOr);
3862 INTERPRET_CASE_EX_LOCK_PARAM3(OP_XOR,Xor, OrXorAnd, EMEmulateXor, EMEmulateLockXor);
3863 INTERPRET_CASE_EX_LOCK_PARAM3(OP_AND,And, OrXorAnd, EMEmulateAnd, EMEmulateLockAnd);
3864 INTERPRET_CASE(OP_MOV,Mov);
3865#ifndef IN_RC
3866 INTERPRET_CASE(OP_STOSWD,StosWD);
3867#endif
3868 INTERPRET_CASE(OP_INVLPG,InvlPg);
3869 INTERPRET_CASE(OP_CPUID,CpuId);
3870 INTERPRET_CASE(OP_MOV_CR,MovCRx);
3871 INTERPRET_CASE(OP_MOV_DR,MovDRx);
3872#ifdef IN_RING0
3873 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LIDT, LIdt, LIGdt);
3874 INTERPRET_CASE_EX_DUAL_PARAM2(OP_LGDT, LGdt, LIGdt);
3875#endif
3876 INTERPRET_CASE(OP_LLDT,LLdt);
3877 INTERPRET_CASE(OP_LMSW,Lmsw);
3878#ifdef EM_EMULATE_SMSW
3879 INTERPRET_CASE(OP_SMSW,Smsw);
3880#endif
3881 INTERPRET_CASE(OP_CLTS,Clts);
3882 INTERPRET_CASE(OP_MONITOR, Monitor);
3883 INTERPRET_CASE(OP_MWAIT, MWait);
3884 INTERPRET_CASE(OP_RDMSR, Rdmsr);
3885 INTERPRET_CASE(OP_WRMSR, Wrmsr);
3886 INTERPRET_CASE_EX_PARAM3(OP_ADD,Add, AddSub, EMEmulateAdd);
3887 INTERPRET_CASE_EX_PARAM3(OP_SUB,Sub, AddSub, EMEmulateSub);
3888 INTERPRET_CASE(OP_ADC,Adc);
3889 INTERPRET_CASE_EX_LOCK_PARAM2(OP_BTR,Btr, BitTest, EMEmulateBtr, EMEmulateLockBtr);
3890 INTERPRET_CASE_EX_PARAM2(OP_BTS,Bts, BitTest, EMEmulateBts);
3891 INTERPRET_CASE_EX_PARAM2(OP_BTC,Btc, BitTest, EMEmulateBtc);
3892 INTERPRET_CASE(OP_RDPMC,Rdpmc);
3893 INTERPRET_CASE(OP_RDTSC,Rdtsc);
3894 INTERPRET_CASE(OP_CMPXCHG, CmpXchg);
3895#ifdef IN_RC
3896 INTERPRET_CASE(OP_STI,Sti);
3897 INTERPRET_CASE(OP_XADD, XAdd);
3898 INTERPRET_CASE(OP_IRET,Iret);
3899#endif
3900 INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
3901 INTERPRET_CASE(OP_HLT,Hlt);
3902 INTERPRET_CASE(OP_WBINVD,WbInvd);
3903#ifdef VBOX_WITH_STATISTICS
3904# ifndef IN_RC
3905 INTERPRET_STAT_CASE(OP_XADD, XAdd);
3906# endif
3907 INTERPRET_STAT_CASE(OP_MOVNTPS,MovNTPS);
3908#endif
3909
3910 default:
3911 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode));
3912 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc));
3913 return VERR_EM_INTERPRETER;
3914
3915#undef INTERPRET_CASE_EX_PARAM2
3916#undef INTERPRET_STAT_CASE
3917#undef INTERPRET_CASE_EX
3918#undef INTERPRET_CASE
3919 } /* switch (opcode) */
3920 /* not reached */
3921}
3922
3923/**
3924 * Interprets the current instruction using the supplied DISCPUSTATE structure.
3925 *
3926 * EIP is *NOT* updated!
3927 *
3928 * @returns VBox strict status code.
3929 * @retval VINF_* Scheduling instructions. When these are returned, it
3930 * starts to get a bit tricky to know whether code was
3931 * executed or not... We'll address this when it becomes a problem.
3932 * @retval VERR_EM_INTERPRETER Something we can't cope with.
3933 * @retval VERR_* Fatal errors.
3934 *
3935 * @param pVCpu The cross context virtual CPU structure.
3936 * @param pDis The disassembler cpu state for the instruction to be
3937 * interpreted.
3938 * @param pRegFrame The register frame. EIP is *NOT* changed!
3939 * @param pvFault The fault address (CR2).
3940 * @param pcbSize Size of the write (if applicable).
3941 * @param enmCodeType Code type (user/supervisor)
3942 *
3943 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
3944 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
3945 * to worry about e.g. invalid modrm combinations (!)
3946 *
3947 * @todo At this time we do NOT check if the instruction overwrites vital information.
3948 * Make sure this can't happen!! (will add some assertions/checks later)
3949 */
3950DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPUOuter(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
3951 RTGCPTR pvFault, EMCODETYPE enmCodeType, uint32_t *pcbSize)
3952{
3953 STAM_PROFILE_START(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
3954 VBOXSTRICTRC rc = emInterpretInstructionCPU(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, pRegFrame, pvFault, enmCodeType, pcbSize);
3955 STAM_PROFILE_STOP(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Emulate), a);
3956 if (RT_SUCCESS(rc))
3957 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretSucceeded));
3958 else
3959 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InterpretFailed));
3960 return rc;
3961}
3962
3963
3964#endif /* !VBOX_WITH_IEM */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette