VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100694

最後變更 在這個檔案從100694是 100694,由 vboxsync 提交於 20 月 前

IEM/VMM: Deal with opcode checking cross page boundraries and tentativiely for branches. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 472.2 KB
 
1/* $Id: IEMAll.cpp 100694 2023-07-25 10:34:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 RT_NOREF(cbInstr);
803#else
804 RT_NOREF(pVCpu, cbInstr);
805#endif
806}
807
808
809
810#ifdef IEM_WITH_CODE_TLB
811
812/**
813 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
814 * failure and jumps.
815 *
816 * We end up here for a number of reasons:
817 * - pbInstrBuf isn't yet initialized.
818 * - Advancing beyond the buffer boundrary (e.g. cross page).
819 * - Advancing beyond the CS segment limit.
820 * - Fetching from non-mappable page (e.g. MMIO).
821 *
822 * @param pVCpu The cross context virtual CPU structure of the
823 * calling thread.
824 * @param pvDst Where to return the bytes.
825 * @param cbDst Number of bytes to read. A value of zero is
826 * allowed for initializing pbInstrBuf (the
827 * recompiler does this). In this case it is best
828 * to set pbInstrBuf to NULL prior to the call.
829 */
830void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
831{
832# ifdef IN_RING3
833 for (;;)
834 {
835 Assert(cbDst <= 8);
836 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
837
838 /*
839 * We might have a partial buffer match, deal with that first to make the
840 * rest simpler. This is the first part of the cross page/buffer case.
841 */
842 if (pVCpu->iem.s.pbInstrBuf != NULL)
843 {
844 if (offBuf < pVCpu->iem.s.cbInstrBuf)
845 {
846 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
847 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
848 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
849
850 cbDst -= cbCopy;
851 pvDst = (uint8_t *)pvDst + cbCopy;
852 offBuf += cbCopy;
853 pVCpu->iem.s.offInstrNextByte += offBuf;
854 }
855 }
856
857 /*
858 * Check segment limit, figuring how much we're allowed to access at this point.
859 *
860 * We will fault immediately if RIP is past the segment limit / in non-canonical
861 * territory. If we do continue, there are one or more bytes to read before we
862 * end up in trouble and we need to do that first before faulting.
863 */
864 RTGCPTR GCPtrFirst;
865 uint32_t cbMaxRead;
866 if (IEM_IS_64BIT_CODE(pVCpu))
867 {
868 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
869 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
870 { /* likely */ }
871 else
872 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
873 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
874 }
875 else
876 {
877 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
878 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
879 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
880 { /* likely */ }
881 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
882 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
883 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
884 if (cbMaxRead != 0)
885 { /* likely */ }
886 else
887 {
888 /* Overflowed because address is 0 and limit is max. */
889 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
890 cbMaxRead = X86_PAGE_SIZE;
891 }
892 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
893 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
894 if (cbMaxRead2 < cbMaxRead)
895 cbMaxRead = cbMaxRead2;
896 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
897 }
898
899 /*
900 * Get the TLB entry for this piece of code.
901 */
902 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
903 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
904 if (pTlbe->uTag == uTag)
905 {
906 /* likely when executing lots of code, otherwise unlikely */
907# ifdef VBOX_WITH_STATISTICS
908 pVCpu->iem.s.CodeTlb.cTlbHits++;
909# endif
910 }
911 else
912 {
913 pVCpu->iem.s.CodeTlb.cTlbMisses++;
914 PGMPTWALK Walk;
915 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
916 if (RT_FAILURE(rc))
917 {
918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
919 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
920 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
921#endif
922 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
923 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
924 }
925
926 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
927 Assert(Walk.fSucceeded);
928 pTlbe->uTag = uTag;
929 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
930 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
931 pTlbe->GCPhys = Walk.GCPhys;
932 pTlbe->pbMappingR3 = NULL;
933 }
934
935 /*
936 * Check TLB page table level access flags.
937 */
938 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
939 {
940 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
941 {
942 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
944 }
945 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
946 {
947 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
948 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
949 }
950 }
951
952 /*
953 * Look up the physical page info if necessary.
954 */
955 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
956 { /* not necessary */ }
957 else
958 {
959 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
963 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
964 { /* likely */ }
965 else
966 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
967 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
968 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
969 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
970 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
971 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
972 }
973
974# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
975 /*
976 * Try do a direct read using the pbMappingR3 pointer.
977 */
978 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
979 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
980 {
981 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
982 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
983 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
984 {
985 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
986 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
987 }
988 else
989 {
990 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
991 if (cbInstr + (uint32_t)cbDst <= 15)
992 {
993 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
994 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
995 }
996 else
997 {
998 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
999 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1000 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1001 }
1002 }
1003 if (cbDst <= cbMaxRead)
1004 {
1005 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1006 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1007 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1008 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1009 pVCpu->iem.s.fTbCrossedPage |= offPg == 0;
1010 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1011 return;
1012 }
1013 pVCpu->iem.s.pbInstrBuf = NULL;
1014
1015 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1016 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1017 }
1018# else
1019# error "refactor as needed"
1020 /*
1021 * If there is no special read handling, so we can read a bit more and
1022 * put it in the prefetch buffer.
1023 */
1024 if ( cbDst < cbMaxRead
1025 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1026 {
1027 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1028 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1029 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1030 { /* likely */ }
1031 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1032 {
1033 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1034 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1035 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1036 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1037 }
1038 else
1039 {
1040 Log((RT_SUCCESS(rcStrict)
1041 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1042 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1043 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1044 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1045 }
1046 }
1047# endif
1048 /*
1049 * Special read handling, so only read exactly what's needed.
1050 * This is a highly unlikely scenario.
1051 */
1052 else
1053 {
1054 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1055
1056 /* Check instruction length. */
1057 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1058 if (RT_LIKELY(cbInstr + cbDst <= 15))
1059 { /* likely */ }
1060 else
1061 {
1062 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1063 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1064 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1065 }
1066
1067 /* Do the reading. */
1068 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1069 if (cbToRead > 0)
1070 {
1071 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1072 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1074 { /* likely */ }
1075 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1076 {
1077 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1078 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1080 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1081 }
1082 else
1083 {
1084 Log((RT_SUCCESS(rcStrict)
1085 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1086 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1087 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1088 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1089 }
1090 }
1091
1092 /* Update the state and probably return. */
1093 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1094 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1095 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1096 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1097 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE;
1098 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1099 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1100 pVCpu->iem.s.pbInstrBuf = NULL;
1101 pVCpu->iem.s.fTbCrossedPage |= offPg == 0;
1102 if (cbToRead == cbDst)
1103 return;
1104 }
1105
1106 /*
1107 * More to read, loop.
1108 */
1109 cbDst -= cbMaxRead;
1110 pvDst = (uint8_t *)pvDst + cbMaxRead;
1111 }
1112# else /* !IN_RING3 */
1113 RT_NOREF(pvDst, cbDst);
1114 if (pvDst || cbDst)
1115 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1116# endif /* !IN_RING3 */
1117}
1118
1119#else /* !IEM_WITH_CODE_TLB */
1120
1121/**
1122 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1123 * exception if it fails.
1124 *
1125 * @returns Strict VBox status code.
1126 * @param pVCpu The cross context virtual CPU structure of the
1127 * calling thread.
1128 * @param cbMin The minimum number of bytes relative offOpcode
1129 * that must be read.
1130 */
1131VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1132{
1133 /*
1134 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1135 *
1136 * First translate CS:rIP to a physical address.
1137 */
1138 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1139 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1140 uint8_t const cbLeft = cbOpcode - offOpcode;
1141 Assert(cbLeft < cbMin);
1142 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1143
1144 uint32_t cbToTryRead;
1145 RTGCPTR GCPtrNext;
1146 if (IEM_IS_64BIT_CODE(pVCpu))
1147 {
1148 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1149 if (!IEM_IS_CANONICAL(GCPtrNext))
1150 return iemRaiseGeneralProtectionFault0(pVCpu);
1151 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1152 }
1153 else
1154 {
1155 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1156 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1157 GCPtrNext32 += cbOpcode;
1158 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1159 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1160 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1161 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1162 if (!cbToTryRead) /* overflowed */
1163 {
1164 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1165 cbToTryRead = UINT32_MAX;
1166 /** @todo check out wrapping around the code segment. */
1167 }
1168 if (cbToTryRead < cbMin - cbLeft)
1169 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1170 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1171
1172 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1173 if (cbToTryRead > cbLeftOnPage)
1174 cbToTryRead = cbLeftOnPage;
1175 }
1176
1177 /* Restrict to opcode buffer space.
1178
1179 We're making ASSUMPTIONS here based on work done previously in
1180 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1181 be fetched in case of an instruction crossing two pages. */
1182 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1183 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1184 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1185 { /* likely */ }
1186 else
1187 {
1188 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1189 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 }
1192
1193 PGMPTWALK Walk;
1194 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1195 if (RT_FAILURE(rc))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1198#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1199 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1200 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1201#endif
1202 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1203 }
1204 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1205 {
1206 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1208 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1209 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1210#endif
1211 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1212 }
1213 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1216#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1217 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1218 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1219#endif
1220 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1221 }
1222 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1223 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1224 /** @todo Check reserved bits and such stuff. PGM is better at doing
1225 * that, so do it when implementing the guest virtual address
1226 * TLB... */
1227
1228 /*
1229 * Read the bytes at this address.
1230 *
1231 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1232 * and since PATM should only patch the start of an instruction there
1233 * should be no need to check again here.
1234 */
1235 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1236 {
1237 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1238 cbToTryRead, PGMACCESSORIGIN_IEM);
1239 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1240 { /* likely */ }
1241 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1242 {
1243 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1244 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1245 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1246 }
1247 else
1248 {
1249 Log((RT_SUCCESS(rcStrict)
1250 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1251 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1252 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1253 return rcStrict;
1254 }
1255 }
1256 else
1257 {
1258 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1259 if (RT_SUCCESS(rc))
1260 { /* likely */ }
1261 else
1262 {
1263 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1264 return rc;
1265 }
1266 }
1267 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1268 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1269
1270 return VINF_SUCCESS;
1271}
1272
1273#endif /* !IEM_WITH_CODE_TLB */
1274#ifndef IEM_WITH_SETJMP
1275
1276/**
1277 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1278 *
1279 * @returns Strict VBox status code.
1280 * @param pVCpu The cross context virtual CPU structure of the
1281 * calling thread.
1282 * @param pb Where to return the opcode byte.
1283 */
1284VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1285{
1286 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1287 if (rcStrict == VINF_SUCCESS)
1288 {
1289 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1290 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1291 pVCpu->iem.s.offOpcode = offOpcode + 1;
1292 }
1293 else
1294 *pb = 0;
1295 return rcStrict;
1296}
1297
1298#else /* IEM_WITH_SETJMP */
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1302 *
1303 * @returns The opcode byte.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 */
1306uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1307{
1308# ifdef IEM_WITH_CODE_TLB
1309 uint8_t u8;
1310 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1311 return u8;
1312# else
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1316 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1317# endif
1318}
1319
1320#endif /* IEM_WITH_SETJMP */
1321
1322#ifndef IEM_WITH_SETJMP
1323
1324/**
1325 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1326 *
1327 * @returns Strict VBox status code.
1328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1329 * @param pu16 Where to return the opcode dword.
1330 */
1331VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1332{
1333 uint8_t u8;
1334 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1335 if (rcStrict == VINF_SUCCESS)
1336 *pu16 = (int8_t)u8;
1337 return rcStrict;
1338}
1339
1340
1341/**
1342 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1343 *
1344 * @returns Strict VBox status code.
1345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1346 * @param pu32 Where to return the opcode dword.
1347 */
1348VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1349{
1350 uint8_t u8;
1351 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1352 if (rcStrict == VINF_SUCCESS)
1353 *pu32 = (int8_t)u8;
1354 return rcStrict;
1355}
1356
1357
1358/**
1359 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1360 *
1361 * @returns Strict VBox status code.
1362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1363 * @param pu64 Where to return the opcode qword.
1364 */
1365VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1366{
1367 uint8_t u8;
1368 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1369 if (rcStrict == VINF_SUCCESS)
1370 *pu64 = (int8_t)u8;
1371 return rcStrict;
1372}
1373
1374#endif /* !IEM_WITH_SETJMP */
1375
1376
1377#ifndef IEM_WITH_SETJMP
1378
1379/**
1380 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1381 *
1382 * @returns Strict VBox status code.
1383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1384 * @param pu16 Where to return the opcode word.
1385 */
1386VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1387{
1388 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1389 if (rcStrict == VINF_SUCCESS)
1390 {
1391 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1392# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1393 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1394# else
1395 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1396# endif
1397 pVCpu->iem.s.offOpcode = offOpcode + 2;
1398 }
1399 else
1400 *pu16 = 0;
1401 return rcStrict;
1402}
1403
1404#else /* IEM_WITH_SETJMP */
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1408 *
1409 * @returns The opcode word.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 */
1412uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1413{
1414# ifdef IEM_WITH_CODE_TLB
1415 uint16_t u16;
1416 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1417 return u16;
1418# else
1419 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1420 if (rcStrict == VINF_SUCCESS)
1421 {
1422 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1423 pVCpu->iem.s.offOpcode += 2;
1424# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1425 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1426# else
1427 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1428# endif
1429 }
1430 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1431# endif
1432}
1433
1434#endif /* IEM_WITH_SETJMP */
1435
1436#ifndef IEM_WITH_SETJMP
1437
1438/**
1439 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1440 *
1441 * @returns Strict VBox status code.
1442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1443 * @param pu32 Where to return the opcode double word.
1444 */
1445VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1446{
1447 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1448 if (rcStrict == VINF_SUCCESS)
1449 {
1450 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1451 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1452 pVCpu->iem.s.offOpcode = offOpcode + 2;
1453 }
1454 else
1455 *pu32 = 0;
1456 return rcStrict;
1457}
1458
1459
1460/**
1461 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1462 *
1463 * @returns Strict VBox status code.
1464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1465 * @param pu64 Where to return the opcode quad word.
1466 */
1467VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1468{
1469 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1470 if (rcStrict == VINF_SUCCESS)
1471 {
1472 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1473 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1474 pVCpu->iem.s.offOpcode = offOpcode + 2;
1475 }
1476 else
1477 *pu64 = 0;
1478 return rcStrict;
1479}
1480
1481#endif /* !IEM_WITH_SETJMP */
1482
1483#ifndef IEM_WITH_SETJMP
1484
1485/**
1486 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1487 *
1488 * @returns Strict VBox status code.
1489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1490 * @param pu32 Where to return the opcode dword.
1491 */
1492VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1493{
1494 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1495 if (rcStrict == VINF_SUCCESS)
1496 {
1497 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1498# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1499 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1500# else
1501 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1502 pVCpu->iem.s.abOpcode[offOpcode + 1],
1503 pVCpu->iem.s.abOpcode[offOpcode + 2],
1504 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1505# endif
1506 pVCpu->iem.s.offOpcode = offOpcode + 4;
1507 }
1508 else
1509 *pu32 = 0;
1510 return rcStrict;
1511}
1512
1513#else /* IEM_WITH_SETJMP */
1514
1515/**
1516 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1517 *
1518 * @returns The opcode dword.
1519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1520 */
1521uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1522{
1523# ifdef IEM_WITH_CODE_TLB
1524 uint32_t u32;
1525 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1526 return u32;
1527# else
1528 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1529 if (rcStrict == VINF_SUCCESS)
1530 {
1531 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1532 pVCpu->iem.s.offOpcode = offOpcode + 4;
1533# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1534 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1535# else
1536 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1537 pVCpu->iem.s.abOpcode[offOpcode + 1],
1538 pVCpu->iem.s.abOpcode[offOpcode + 2],
1539 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1540# endif
1541 }
1542 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1543# endif
1544}
1545
1546#endif /* IEM_WITH_SETJMP */
1547
1548#ifndef IEM_WITH_SETJMP
1549
1550/**
1551 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1552 *
1553 * @returns Strict VBox status code.
1554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1555 * @param pu64 Where to return the opcode dword.
1556 */
1557VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1558{
1559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1560 if (rcStrict == VINF_SUCCESS)
1561 {
1562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1563 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567 pVCpu->iem.s.offOpcode = offOpcode + 4;
1568 }
1569 else
1570 *pu64 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1580 * @param pu64 Where to return the opcode qword.
1581 */
1582VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1583{
1584 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1585 if (rcStrict == VINF_SUCCESS)
1586 {
1587 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1588 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1589 pVCpu->iem.s.abOpcode[offOpcode + 1],
1590 pVCpu->iem.s.abOpcode[offOpcode + 2],
1591 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1592 pVCpu->iem.s.offOpcode = offOpcode + 4;
1593 }
1594 else
1595 *pu64 = 0;
1596 return rcStrict;
1597}
1598
1599#endif /* !IEM_WITH_SETJMP */
1600
1601#ifndef IEM_WITH_SETJMP
1602
1603/**
1604 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1605 *
1606 * @returns Strict VBox status code.
1607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1608 * @param pu64 Where to return the opcode qword.
1609 */
1610VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1611{
1612 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1613 if (rcStrict == VINF_SUCCESS)
1614 {
1615 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1616# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1617 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1618# else
1619 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1620 pVCpu->iem.s.abOpcode[offOpcode + 1],
1621 pVCpu->iem.s.abOpcode[offOpcode + 2],
1622 pVCpu->iem.s.abOpcode[offOpcode + 3],
1623 pVCpu->iem.s.abOpcode[offOpcode + 4],
1624 pVCpu->iem.s.abOpcode[offOpcode + 5],
1625 pVCpu->iem.s.abOpcode[offOpcode + 6],
1626 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1627# endif
1628 pVCpu->iem.s.offOpcode = offOpcode + 8;
1629 }
1630 else
1631 *pu64 = 0;
1632 return rcStrict;
1633}
1634
1635#else /* IEM_WITH_SETJMP */
1636
1637/**
1638 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1639 *
1640 * @returns The opcode qword.
1641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1642 */
1643uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1644{
1645# ifdef IEM_WITH_CODE_TLB
1646 uint64_t u64;
1647 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1648 return u64;
1649# else
1650 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1651 if (rcStrict == VINF_SUCCESS)
1652 {
1653 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1654 pVCpu->iem.s.offOpcode = offOpcode + 8;
1655# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1656 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1657# else
1658 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1659 pVCpu->iem.s.abOpcode[offOpcode + 1],
1660 pVCpu->iem.s.abOpcode[offOpcode + 2],
1661 pVCpu->iem.s.abOpcode[offOpcode + 3],
1662 pVCpu->iem.s.abOpcode[offOpcode + 4],
1663 pVCpu->iem.s.abOpcode[offOpcode + 5],
1664 pVCpu->iem.s.abOpcode[offOpcode + 6],
1665 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1666# endif
1667 }
1668 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1669# endif
1670}
1671
1672#endif /* IEM_WITH_SETJMP */
1673
1674
1675
1676/** @name Misc Worker Functions.
1677 * @{
1678 */
1679
1680/**
1681 * Gets the exception class for the specified exception vector.
1682 *
1683 * @returns The class of the specified exception.
1684 * @param uVector The exception vector.
1685 */
1686static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1687{
1688 Assert(uVector <= X86_XCPT_LAST);
1689 switch (uVector)
1690 {
1691 case X86_XCPT_DE:
1692 case X86_XCPT_TS:
1693 case X86_XCPT_NP:
1694 case X86_XCPT_SS:
1695 case X86_XCPT_GP:
1696 case X86_XCPT_SX: /* AMD only */
1697 return IEMXCPTCLASS_CONTRIBUTORY;
1698
1699 case X86_XCPT_PF:
1700 case X86_XCPT_VE: /* Intel only */
1701 return IEMXCPTCLASS_PAGE_FAULT;
1702
1703 case X86_XCPT_DF:
1704 return IEMXCPTCLASS_DOUBLE_FAULT;
1705 }
1706 return IEMXCPTCLASS_BENIGN;
1707}
1708
1709
1710/**
1711 * Evaluates how to handle an exception caused during delivery of another event
1712 * (exception / interrupt).
1713 *
1714 * @returns How to handle the recursive exception.
1715 * @param pVCpu The cross context virtual CPU structure of the
1716 * calling thread.
1717 * @param fPrevFlags The flags of the previous event.
1718 * @param uPrevVector The vector of the previous event.
1719 * @param fCurFlags The flags of the current exception.
1720 * @param uCurVector The vector of the current exception.
1721 * @param pfXcptRaiseInfo Where to store additional information about the
1722 * exception condition. Optional.
1723 */
1724VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1725 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1726{
1727 /*
1728 * Only CPU exceptions can be raised while delivering other events, software interrupt
1729 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1730 */
1731 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1732 Assert(pVCpu); RT_NOREF(pVCpu);
1733 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1734
1735 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1736 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1737 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1738 {
1739 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1740 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1741 {
1742 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1743 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1744 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1745 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1746 {
1747 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1748 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1749 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1750 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1751 uCurVector, pVCpu->cpum.GstCtx.cr2));
1752 }
1753 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1754 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1755 {
1756 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1757 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1758 }
1759 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1760 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1761 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1762 {
1763 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1764 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1765 }
1766 }
1767 else
1768 {
1769 if (uPrevVector == X86_XCPT_NMI)
1770 {
1771 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1772 if (uCurVector == X86_XCPT_PF)
1773 {
1774 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1775 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1776 }
1777 }
1778 else if ( uPrevVector == X86_XCPT_AC
1779 && uCurVector == X86_XCPT_AC)
1780 {
1781 enmRaise = IEMXCPTRAISE_CPU_HANG;
1782 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1783 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1784 }
1785 }
1786 }
1787 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1788 {
1789 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1790 if (uCurVector == X86_XCPT_PF)
1791 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1792 }
1793 else
1794 {
1795 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1796 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1797 }
1798
1799 if (pfXcptRaiseInfo)
1800 *pfXcptRaiseInfo = fRaiseInfo;
1801 return enmRaise;
1802}
1803
1804
1805/**
1806 * Enters the CPU shutdown state initiated by a triple fault or other
1807 * unrecoverable conditions.
1808 *
1809 * @returns Strict VBox status code.
1810 * @param pVCpu The cross context virtual CPU structure of the
1811 * calling thread.
1812 */
1813static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1814{
1815 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1816 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1817
1818 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1819 {
1820 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1821 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1822 }
1823
1824 RT_NOREF(pVCpu);
1825 return VINF_EM_TRIPLE_FAULT;
1826}
1827
1828
1829/**
1830 * Validates a new SS segment.
1831 *
1832 * @returns VBox strict status code.
1833 * @param pVCpu The cross context virtual CPU structure of the
1834 * calling thread.
1835 * @param NewSS The new SS selctor.
1836 * @param uCpl The CPL to load the stack for.
1837 * @param pDesc Where to return the descriptor.
1838 */
1839static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1840{
1841 /* Null selectors are not allowed (we're not called for dispatching
1842 interrupts with SS=0 in long mode). */
1843 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1844 {
1845 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1846 return iemRaiseTaskSwitchFault0(pVCpu);
1847 }
1848
1849 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1850 if ((NewSS & X86_SEL_RPL) != uCpl)
1851 {
1852 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1853 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1854 }
1855
1856 /*
1857 * Read the descriptor.
1858 */
1859 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1860 if (rcStrict != VINF_SUCCESS)
1861 return rcStrict;
1862
1863 /*
1864 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1865 */
1866 if (!pDesc->Legacy.Gen.u1DescType)
1867 {
1868 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1869 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1870 }
1871
1872 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1873 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1874 {
1875 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1876 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1877 }
1878 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1879 {
1880 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1881 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1882 }
1883
1884 /* Is it there? */
1885 /** @todo testcase: Is this checked before the canonical / limit check below? */
1886 if (!pDesc->Legacy.Gen.u1Present)
1887 {
1888 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1889 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1890 }
1891
1892 return VINF_SUCCESS;
1893}
1894
1895/** @} */
1896
1897
1898/** @name Raising Exceptions.
1899 *
1900 * @{
1901 */
1902
1903
1904/**
1905 * Loads the specified stack far pointer from the TSS.
1906 *
1907 * @returns VBox strict status code.
1908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1909 * @param uCpl The CPL to load the stack for.
1910 * @param pSelSS Where to return the new stack segment.
1911 * @param puEsp Where to return the new stack pointer.
1912 */
1913static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1914{
1915 VBOXSTRICTRC rcStrict;
1916 Assert(uCpl < 4);
1917
1918 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1919 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1920 {
1921 /*
1922 * 16-bit TSS (X86TSS16).
1923 */
1924 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1925 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1926 {
1927 uint32_t off = uCpl * 4 + 2;
1928 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1929 {
1930 /** @todo check actual access pattern here. */
1931 uint32_t u32Tmp = 0; /* gcc maybe... */
1932 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1933 if (rcStrict == VINF_SUCCESS)
1934 {
1935 *puEsp = RT_LOWORD(u32Tmp);
1936 *pSelSS = RT_HIWORD(u32Tmp);
1937 return VINF_SUCCESS;
1938 }
1939 }
1940 else
1941 {
1942 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1943 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1944 }
1945 break;
1946 }
1947
1948 /*
1949 * 32-bit TSS (X86TSS32).
1950 */
1951 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 8 + 4;
1955 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957/** @todo check actual access pattern here. */
1958 uint64_t u64Tmp;
1959 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = u64Tmp & UINT32_MAX;
1963 *pSelSS = (RTSEL)(u64Tmp >> 32);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 default:
1976 AssertFailed();
1977 rcStrict = VERR_IEM_IPE_4;
1978 break;
1979 }
1980
1981 *puEsp = 0; /* make gcc happy */
1982 *pSelSS = 0; /* make gcc happy */
1983 return rcStrict;
1984}
1985
1986
1987/**
1988 * Loads the specified stack pointer from the 64-bit TSS.
1989 *
1990 * @returns VBox strict status code.
1991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1992 * @param uCpl The CPL to load the stack for.
1993 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1994 * @param puRsp Where to return the new stack pointer.
1995 */
1996static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1997{
1998 Assert(uCpl < 4);
1999 Assert(uIst < 8);
2000 *puRsp = 0; /* make gcc happy */
2001
2002 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2003 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2004
2005 uint32_t off;
2006 if (uIst)
2007 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2008 else
2009 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2010 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2011 {
2012 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2013 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2014 }
2015
2016 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2017}
2018
2019
2020/**
2021 * Adjust the CPU state according to the exception being raised.
2022 *
2023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2024 * @param u8Vector The exception that has been raised.
2025 */
2026DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2027{
2028 switch (u8Vector)
2029 {
2030 case X86_XCPT_DB:
2031 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2032 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2033 break;
2034 /** @todo Read the AMD and Intel exception reference... */
2035 }
2036}
2037
2038
2039/**
2040 * Implements exceptions and interrupts for real mode.
2041 *
2042 * @returns VBox strict status code.
2043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2044 * @param cbInstr The number of bytes to offset rIP by in the return
2045 * address.
2046 * @param u8Vector The interrupt / exception vector number.
2047 * @param fFlags The flags.
2048 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2049 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2050 */
2051static VBOXSTRICTRC
2052iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2053 uint8_t cbInstr,
2054 uint8_t u8Vector,
2055 uint32_t fFlags,
2056 uint16_t uErr,
2057 uint64_t uCr2) RT_NOEXCEPT
2058{
2059 NOREF(uErr); NOREF(uCr2);
2060 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2061
2062 /*
2063 * Read the IDT entry.
2064 */
2065 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2066 {
2067 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2068 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2069 }
2070 RTFAR16 Idte;
2071 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2072 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2073 {
2074 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2075 return rcStrict;
2076 }
2077
2078 /*
2079 * Push the stack frame.
2080 */
2081 uint16_t *pu16Frame;
2082 uint64_t uNewRsp;
2083 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2084 if (rcStrict != VINF_SUCCESS)
2085 return rcStrict;
2086
2087 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2088#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2089 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2090 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2091 fEfl |= UINT16_C(0xf000);
2092#endif
2093 pu16Frame[2] = (uint16_t)fEfl;
2094 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2095 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2096 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2097 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2098 return rcStrict;
2099
2100 /*
2101 * Load the vector address into cs:ip and make exception specific state
2102 * adjustments.
2103 */
2104 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2105 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2106 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2107 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2108 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2109 pVCpu->cpum.GstCtx.rip = Idte.off;
2110 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2111 IEMMISC_SET_EFL(pVCpu, fEfl);
2112
2113 /** @todo do we actually do this in real mode? */
2114 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2115 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2116
2117 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2118 so best leave them alone in case we're in a weird kind of real mode... */
2119
2120 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2121}
2122
2123
2124/**
2125 * Loads a NULL data selector into when coming from V8086 mode.
2126 *
2127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2128 * @param pSReg Pointer to the segment register.
2129 */
2130DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2131{
2132 pSReg->Sel = 0;
2133 pSReg->ValidSel = 0;
2134 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2135 {
2136 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2137 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2138 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2139 }
2140 else
2141 {
2142 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2143 /** @todo check this on AMD-V */
2144 pSReg->u64Base = 0;
2145 pSReg->u32Limit = 0;
2146 }
2147}
2148
2149
2150/**
2151 * Loads a segment selector during a task switch in V8086 mode.
2152 *
2153 * @param pSReg Pointer to the segment register.
2154 * @param uSel The selector value to load.
2155 */
2156DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2157{
2158 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2159 pSReg->Sel = uSel;
2160 pSReg->ValidSel = uSel;
2161 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2162 pSReg->u64Base = uSel << 4;
2163 pSReg->u32Limit = 0xffff;
2164 pSReg->Attr.u = 0xf3;
2165}
2166
2167
2168/**
2169 * Loads a segment selector during a task switch in protected mode.
2170 *
2171 * In this task switch scenario, we would throw \#TS exceptions rather than
2172 * \#GPs.
2173 *
2174 * @returns VBox strict status code.
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 * @param uSel The new selector value.
2178 *
2179 * @remarks This does _not_ handle CS or SS.
2180 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2181 */
2182static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2183{
2184 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2185
2186 /* Null data selector. */
2187 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2188 {
2189 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2191 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2192 return VINF_SUCCESS;
2193 }
2194
2195 /* Fetch the descriptor. */
2196 IEMSELDESC Desc;
2197 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2198 if (rcStrict != VINF_SUCCESS)
2199 {
2200 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2201 VBOXSTRICTRC_VAL(rcStrict)));
2202 return rcStrict;
2203 }
2204
2205 /* Must be a data segment or readable code segment. */
2206 if ( !Desc.Legacy.Gen.u1DescType
2207 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2208 {
2209 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2210 Desc.Legacy.Gen.u4Type));
2211 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2212 }
2213
2214 /* Check privileges for data segments and non-conforming code segments. */
2215 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2216 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2217 {
2218 /* The RPL and the new CPL must be less than or equal to the DPL. */
2219 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2220 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2221 {
2222 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2223 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2224 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2225 }
2226 }
2227
2228 /* Is it there? */
2229 if (!Desc.Legacy.Gen.u1Present)
2230 {
2231 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2232 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2233 }
2234
2235 /* The base and limit. */
2236 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2237 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2238
2239 /*
2240 * Ok, everything checked out fine. Now set the accessed bit before
2241 * committing the result into the registers.
2242 */
2243 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2244 {
2245 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2246 if (rcStrict != VINF_SUCCESS)
2247 return rcStrict;
2248 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2249 }
2250
2251 /* Commit */
2252 pSReg->Sel = uSel;
2253 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2254 pSReg->u32Limit = cbLimit;
2255 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2256 pSReg->ValidSel = uSel;
2257 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2258 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2259 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2260
2261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2262 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2263 return VINF_SUCCESS;
2264}
2265
2266
2267/**
2268 * Performs a task switch.
2269 *
2270 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2271 * caller is responsible for performing the necessary checks (like DPL, TSS
2272 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2273 * reference for JMP, CALL, IRET.
2274 *
2275 * If the task switch is the due to a software interrupt or hardware exception,
2276 * the caller is responsible for validating the TSS selector and descriptor. See
2277 * Intel Instruction reference for INT n.
2278 *
2279 * @returns VBox strict status code.
2280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2281 * @param enmTaskSwitch The cause of the task switch.
2282 * @param uNextEip The EIP effective after the task switch.
2283 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2284 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2285 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2286 * @param SelTSS The TSS selector of the new task.
2287 * @param pNewDescTSS Pointer to the new TSS descriptor.
2288 */
2289VBOXSTRICTRC
2290iemTaskSwitch(PVMCPUCC pVCpu,
2291 IEMTASKSWITCH enmTaskSwitch,
2292 uint32_t uNextEip,
2293 uint32_t fFlags,
2294 uint16_t uErr,
2295 uint64_t uCr2,
2296 RTSEL SelTSS,
2297 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2298{
2299 Assert(!IEM_IS_REAL_MODE(pVCpu));
2300 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2302
2303 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2304 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2305 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2306 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2307 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2308
2309 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2310 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2311
2312 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2313 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2314
2315 /* Update CR2 in case it's a page-fault. */
2316 /** @todo This should probably be done much earlier in IEM/PGM. See
2317 * @bugref{5653#c49}. */
2318 if (fFlags & IEM_XCPT_FLAGS_CR2)
2319 pVCpu->cpum.GstCtx.cr2 = uCr2;
2320
2321 /*
2322 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2323 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2324 */
2325 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2326 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2327 if (uNewTSSLimit < uNewTSSLimitMin)
2328 {
2329 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2330 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2331 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2332 }
2333
2334 /*
2335 * Task switches in VMX non-root mode always cause task switches.
2336 * The new TSS must have been read and validated (DPL, limits etc.) before a
2337 * task-switch VM-exit commences.
2338 *
2339 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2340 */
2341 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2342 {
2343 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2344 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2345 }
2346
2347 /*
2348 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2349 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2350 */
2351 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2352 {
2353 uint32_t const uExitInfo1 = SelTSS;
2354 uint32_t uExitInfo2 = uErr;
2355 switch (enmTaskSwitch)
2356 {
2357 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2358 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2359 default: break;
2360 }
2361 if (fFlags & IEM_XCPT_FLAGS_ERR)
2362 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2363 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2364 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2365
2366 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2367 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2368 RT_NOREF2(uExitInfo1, uExitInfo2);
2369 }
2370
2371 /*
2372 * Check the current TSS limit. The last written byte to the current TSS during the
2373 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2374 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2375 *
2376 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2377 * end up with smaller than "legal" TSS limits.
2378 */
2379 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2380 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2381 if (uCurTSSLimit < uCurTSSLimitMin)
2382 {
2383 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2384 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2385 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2386 }
2387
2388 /*
2389 * Verify that the new TSS can be accessed and map it. Map only the required contents
2390 * and not the entire TSS.
2391 */
2392 void *pvNewTSS;
2393 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2394 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2395 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2396 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2397 * not perform correct translation if this happens. See Intel spec. 7.2.1
2398 * "Task-State Segment". */
2399 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2400 if (rcStrict != VINF_SUCCESS)
2401 {
2402 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2403 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2404 return rcStrict;
2405 }
2406
2407 /*
2408 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2409 */
2410 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2411 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2412 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2413 {
2414 PX86DESC pDescCurTSS;
2415 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2416 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2417 if (rcStrict != VINF_SUCCESS)
2418 {
2419 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2420 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2421 return rcStrict;
2422 }
2423
2424 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2425 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2426 if (rcStrict != VINF_SUCCESS)
2427 {
2428 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2429 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2434 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2435 {
2436 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2437 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2438 fEFlags &= ~X86_EFL_NT;
2439 }
2440 }
2441
2442 /*
2443 * Save the CPU state into the current TSS.
2444 */
2445 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2446 if (GCPtrNewTSS == GCPtrCurTSS)
2447 {
2448 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2449 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2450 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2451 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2452 pVCpu->cpum.GstCtx.ldtr.Sel));
2453 }
2454 if (fIsNewTSS386)
2455 {
2456 /*
2457 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2458 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2459 */
2460 void *pvCurTSS32;
2461 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2462 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2463 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2464 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2465 if (rcStrict != VINF_SUCCESS)
2466 {
2467 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2468 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2469 return rcStrict;
2470 }
2471
2472 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2473 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2474 pCurTSS32->eip = uNextEip;
2475 pCurTSS32->eflags = fEFlags;
2476 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2477 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2478 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2479 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2480 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2481 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2482 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2483 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2484 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2485 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2486 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2487 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2488 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2489 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2490
2491 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2492 if (rcStrict != VINF_SUCCESS)
2493 {
2494 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2495 VBOXSTRICTRC_VAL(rcStrict)));
2496 return rcStrict;
2497 }
2498 }
2499 else
2500 {
2501 /*
2502 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2503 */
2504 void *pvCurTSS16;
2505 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2506 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2507 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2508 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2509 if (rcStrict != VINF_SUCCESS)
2510 {
2511 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2512 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2513 return rcStrict;
2514 }
2515
2516 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2517 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2518 pCurTSS16->ip = uNextEip;
2519 pCurTSS16->flags = (uint16_t)fEFlags;
2520 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2521 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2522 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2523 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2524 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2525 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2526 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2527 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2528 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2529 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2530 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2531 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2532
2533 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2534 if (rcStrict != VINF_SUCCESS)
2535 {
2536 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2537 VBOXSTRICTRC_VAL(rcStrict)));
2538 return rcStrict;
2539 }
2540 }
2541
2542 /*
2543 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2544 */
2545 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2546 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2547 {
2548 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2549 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2550 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2551 }
2552
2553 /*
2554 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2555 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2556 */
2557 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2558 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2559 bool fNewDebugTrap;
2560 if (fIsNewTSS386)
2561 {
2562 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2563 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2564 uNewEip = pNewTSS32->eip;
2565 uNewEflags = pNewTSS32->eflags;
2566 uNewEax = pNewTSS32->eax;
2567 uNewEcx = pNewTSS32->ecx;
2568 uNewEdx = pNewTSS32->edx;
2569 uNewEbx = pNewTSS32->ebx;
2570 uNewEsp = pNewTSS32->esp;
2571 uNewEbp = pNewTSS32->ebp;
2572 uNewEsi = pNewTSS32->esi;
2573 uNewEdi = pNewTSS32->edi;
2574 uNewES = pNewTSS32->es;
2575 uNewCS = pNewTSS32->cs;
2576 uNewSS = pNewTSS32->ss;
2577 uNewDS = pNewTSS32->ds;
2578 uNewFS = pNewTSS32->fs;
2579 uNewGS = pNewTSS32->gs;
2580 uNewLdt = pNewTSS32->selLdt;
2581 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2582 }
2583 else
2584 {
2585 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2586 uNewCr3 = 0;
2587 uNewEip = pNewTSS16->ip;
2588 uNewEflags = pNewTSS16->flags;
2589 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2590 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2591 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2592 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2593 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2594 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2595 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2596 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2597 uNewES = pNewTSS16->es;
2598 uNewCS = pNewTSS16->cs;
2599 uNewSS = pNewTSS16->ss;
2600 uNewDS = pNewTSS16->ds;
2601 uNewFS = 0;
2602 uNewGS = 0;
2603 uNewLdt = pNewTSS16->selLdt;
2604 fNewDebugTrap = false;
2605 }
2606
2607 if (GCPtrNewTSS == GCPtrCurTSS)
2608 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2609 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2610
2611 /*
2612 * We're done accessing the new TSS.
2613 */
2614 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2615 if (rcStrict != VINF_SUCCESS)
2616 {
2617 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2618 return rcStrict;
2619 }
2620
2621 /*
2622 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2623 */
2624 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2625 {
2626 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2627 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2628 if (rcStrict != VINF_SUCCESS)
2629 {
2630 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2631 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2632 return rcStrict;
2633 }
2634
2635 /* Check that the descriptor indicates the new TSS is available (not busy). */
2636 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2637 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2638 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2639
2640 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2641 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2645 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2646 return rcStrict;
2647 }
2648 }
2649
2650 /*
2651 * From this point on, we're technically in the new task. We will defer exceptions
2652 * until the completion of the task switch but before executing any instructions in the new task.
2653 */
2654 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2655 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2656 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2657 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2658 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2659 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2660 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2661
2662 /* Set the busy bit in TR. */
2663 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2664
2665 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2666 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2667 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2668 {
2669 uNewEflags |= X86_EFL_NT;
2670 }
2671
2672 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2673 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2674 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2675
2676 pVCpu->cpum.GstCtx.eip = uNewEip;
2677 pVCpu->cpum.GstCtx.eax = uNewEax;
2678 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2679 pVCpu->cpum.GstCtx.edx = uNewEdx;
2680 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2681 pVCpu->cpum.GstCtx.esp = uNewEsp;
2682 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2683 pVCpu->cpum.GstCtx.esi = uNewEsi;
2684 pVCpu->cpum.GstCtx.edi = uNewEdi;
2685
2686 uNewEflags &= X86_EFL_LIVE_MASK;
2687 uNewEflags |= X86_EFL_RA1_MASK;
2688 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2689
2690 /*
2691 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2692 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2693 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2694 */
2695 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2696 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2697
2698 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2699 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2700
2701 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2702 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2703
2704 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2705 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2706
2707 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2708 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2709
2710 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2711 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2712 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2713
2714 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2715 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2716 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2717 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2718
2719 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2720 {
2721 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2722 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2723 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2724 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2725 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2726 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2727 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2728 }
2729
2730 /*
2731 * Switch CR3 for the new task.
2732 */
2733 if ( fIsNewTSS386
2734 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2735 {
2736 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2737 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2738 AssertRCSuccessReturn(rc, rc);
2739
2740 /* Inform PGM. */
2741 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2742 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2743 AssertRCReturn(rc, rc);
2744 /* ignore informational status codes */
2745
2746 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2747 }
2748
2749 /*
2750 * Switch LDTR for the new task.
2751 */
2752 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2753 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2754 else
2755 {
2756 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2757
2758 IEMSELDESC DescNewLdt;
2759 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2760 if (rcStrict != VINF_SUCCESS)
2761 {
2762 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2763 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2764 return rcStrict;
2765 }
2766 if ( !DescNewLdt.Legacy.Gen.u1Present
2767 || DescNewLdt.Legacy.Gen.u1DescType
2768 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2769 {
2770 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2771 uNewLdt, DescNewLdt.Legacy.u));
2772 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2773 }
2774
2775 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2776 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2777 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2778 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2779 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2780 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2781 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2783 }
2784
2785 IEMSELDESC DescSS;
2786 if (IEM_IS_V86_MODE(pVCpu))
2787 {
2788 IEM_SET_CPL(pVCpu, 3);
2789 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2790 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2791 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2792 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2793 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2794 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2795
2796 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2797 DescSS.Legacy.u = 0;
2798 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2799 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2800 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2801 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2802 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2803 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2804 DescSS.Legacy.Gen.u2Dpl = 3;
2805 }
2806 else
2807 {
2808 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2809
2810 /*
2811 * Load the stack segment for the new task.
2812 */
2813 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2814 {
2815 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2816 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2817 }
2818
2819 /* Fetch the descriptor. */
2820 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2821 if (rcStrict != VINF_SUCCESS)
2822 {
2823 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2824 VBOXSTRICTRC_VAL(rcStrict)));
2825 return rcStrict;
2826 }
2827
2828 /* SS must be a data segment and writable. */
2829 if ( !DescSS.Legacy.Gen.u1DescType
2830 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2831 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2832 {
2833 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2834 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2836 }
2837
2838 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2839 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2840 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2841 {
2842 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2843 uNewCpl));
2844 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2845 }
2846
2847 /* Is it there? */
2848 if (!DescSS.Legacy.Gen.u1Present)
2849 {
2850 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2851 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2852 }
2853
2854 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2855 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2856
2857 /* Set the accessed bit before committing the result into SS. */
2858 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2859 {
2860 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2861 if (rcStrict != VINF_SUCCESS)
2862 return rcStrict;
2863 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2864 }
2865
2866 /* Commit SS. */
2867 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2868 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2869 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2870 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2871 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2872 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2873 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2874
2875 /* CPL has changed, update IEM before loading rest of segments. */
2876 IEM_SET_CPL(pVCpu, uNewCpl);
2877
2878 /*
2879 * Load the data segments for the new task.
2880 */
2881 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2882 if (rcStrict != VINF_SUCCESS)
2883 return rcStrict;
2884 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2885 if (rcStrict != VINF_SUCCESS)
2886 return rcStrict;
2887 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2888 if (rcStrict != VINF_SUCCESS)
2889 return rcStrict;
2890 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2891 if (rcStrict != VINF_SUCCESS)
2892 return rcStrict;
2893
2894 /*
2895 * Load the code segment for the new task.
2896 */
2897 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2898 {
2899 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Fetch the descriptor. */
2904 IEMSELDESC DescCS;
2905 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2906 if (rcStrict != VINF_SUCCESS)
2907 {
2908 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2909 return rcStrict;
2910 }
2911
2912 /* CS must be a code segment. */
2913 if ( !DescCS.Legacy.Gen.u1DescType
2914 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2915 {
2916 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2917 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2918 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2919 }
2920
2921 /* For conforming CS, DPL must be less than or equal to the RPL. */
2922 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2923 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2924 {
2925 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2926 DescCS.Legacy.Gen.u2Dpl));
2927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2928 }
2929
2930 /* For non-conforming CS, DPL must match RPL. */
2931 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2932 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2933 {
2934 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2935 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2936 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 /* Is it there? */
2940 if (!DescCS.Legacy.Gen.u1Present)
2941 {
2942 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2943 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2944 }
2945
2946 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2947 u64Base = X86DESC_BASE(&DescCS.Legacy);
2948
2949 /* Set the accessed bit before committing the result into CS. */
2950 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2951 {
2952 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2953 if (rcStrict != VINF_SUCCESS)
2954 return rcStrict;
2955 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2956 }
2957
2958 /* Commit CS. */
2959 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2960 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2961 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2962 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2963 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2964 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2965 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2966 }
2967
2968 /* Make sure the CPU mode is correct. */
2969 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2970 if (fExecNew != pVCpu->iem.s.fExec)
2971 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2972 pVCpu->iem.s.fExec = fExecNew;
2973
2974 /** @todo Debug trap. */
2975 if (fIsNewTSS386 && fNewDebugTrap)
2976 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2977
2978 /*
2979 * Construct the error code masks based on what caused this task switch.
2980 * See Intel Instruction reference for INT.
2981 */
2982 uint16_t uExt;
2983 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2984 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2985 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2986 uExt = 1;
2987 else
2988 uExt = 0;
2989
2990 /*
2991 * Push any error code on to the new stack.
2992 */
2993 if (fFlags & IEM_XCPT_FLAGS_ERR)
2994 {
2995 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2996 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2997 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2998
2999 /* Check that there is sufficient space on the stack. */
3000 /** @todo Factor out segment limit checking for normal/expand down segments
3001 * into a separate function. */
3002 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3003 {
3004 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3005 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3006 {
3007 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3008 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3009 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3010 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3011 }
3012 }
3013 else
3014 {
3015 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3016 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3017 {
3018 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3019 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3020 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3021 }
3022 }
3023
3024
3025 if (fIsNewTSS386)
3026 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3027 else
3028 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3029 if (rcStrict != VINF_SUCCESS)
3030 {
3031 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3032 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3033 return rcStrict;
3034 }
3035 }
3036
3037 /* Check the new EIP against the new CS limit. */
3038 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3039 {
3040 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3041 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3042 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3043 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3044 }
3045
3046 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3047 pVCpu->cpum.GstCtx.ss.Sel));
3048 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3049}
3050
3051
3052/**
3053 * Implements exceptions and interrupts for protected mode.
3054 *
3055 * @returns VBox strict status code.
3056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3057 * @param cbInstr The number of bytes to offset rIP by in the return
3058 * address.
3059 * @param u8Vector The interrupt / exception vector number.
3060 * @param fFlags The flags.
3061 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3062 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3063 */
3064static VBOXSTRICTRC
3065iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3066 uint8_t cbInstr,
3067 uint8_t u8Vector,
3068 uint32_t fFlags,
3069 uint16_t uErr,
3070 uint64_t uCr2) RT_NOEXCEPT
3071{
3072 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3073
3074 /*
3075 * Read the IDT entry.
3076 */
3077 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3078 {
3079 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3080 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3081 }
3082 X86DESC Idte;
3083 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3084 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3085 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3086 {
3087 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3088 return rcStrict;
3089 }
3090 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3091 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3092 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3093 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3094
3095 /*
3096 * Check the descriptor type, DPL and such.
3097 * ASSUMES this is done in the same order as described for call-gate calls.
3098 */
3099 if (Idte.Gate.u1DescType)
3100 {
3101 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3102 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3103 }
3104 bool fTaskGate = false;
3105 uint8_t f32BitGate = true;
3106 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3107 switch (Idte.Gate.u4Type)
3108 {
3109 case X86_SEL_TYPE_SYS_UNDEFINED:
3110 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3111 case X86_SEL_TYPE_SYS_LDT:
3112 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3113 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3114 case X86_SEL_TYPE_SYS_UNDEFINED2:
3115 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3116 case X86_SEL_TYPE_SYS_UNDEFINED3:
3117 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3118 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3119 case X86_SEL_TYPE_SYS_UNDEFINED4:
3120 {
3121 /** @todo check what actually happens when the type is wrong...
3122 * esp. call gates. */
3123 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3124 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3125 }
3126
3127 case X86_SEL_TYPE_SYS_286_INT_GATE:
3128 f32BitGate = false;
3129 RT_FALL_THRU();
3130 case X86_SEL_TYPE_SYS_386_INT_GATE:
3131 fEflToClear |= X86_EFL_IF;
3132 break;
3133
3134 case X86_SEL_TYPE_SYS_TASK_GATE:
3135 fTaskGate = true;
3136#ifndef IEM_IMPLEMENTS_TASKSWITCH
3137 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3138#endif
3139 break;
3140
3141 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3142 f32BitGate = false;
3143 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3144 break;
3145
3146 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3147 }
3148
3149 /* Check DPL against CPL if applicable. */
3150 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3151 {
3152 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3153 {
3154 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3155 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3156 }
3157 }
3158
3159 /* Is it there? */
3160 if (!Idte.Gate.u1Present)
3161 {
3162 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3163 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3164 }
3165
3166 /* Is it a task-gate? */
3167 if (fTaskGate)
3168 {
3169 /*
3170 * Construct the error code masks based on what caused this task switch.
3171 * See Intel Instruction reference for INT.
3172 */
3173 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3174 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3175 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3176 RTSEL SelTSS = Idte.Gate.u16Sel;
3177
3178 /*
3179 * Fetch the TSS descriptor in the GDT.
3180 */
3181 IEMSELDESC DescTSS;
3182 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3183 if (rcStrict != VINF_SUCCESS)
3184 {
3185 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3186 VBOXSTRICTRC_VAL(rcStrict)));
3187 return rcStrict;
3188 }
3189
3190 /* The TSS descriptor must be a system segment and be available (not busy). */
3191 if ( DescTSS.Legacy.Gen.u1DescType
3192 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3193 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3194 {
3195 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3196 u8Vector, SelTSS, DescTSS.Legacy.au64));
3197 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3198 }
3199
3200 /* The TSS must be present. */
3201 if (!DescTSS.Legacy.Gen.u1Present)
3202 {
3203 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3204 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3205 }
3206
3207 /* Do the actual task switch. */
3208 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3209 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3210 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3211 }
3212
3213 /* A null CS is bad. */
3214 RTSEL NewCS = Idte.Gate.u16Sel;
3215 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3216 {
3217 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3218 return iemRaiseGeneralProtectionFault0(pVCpu);
3219 }
3220
3221 /* Fetch the descriptor for the new CS. */
3222 IEMSELDESC DescCS;
3223 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3224 if (rcStrict != VINF_SUCCESS)
3225 {
3226 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3227 return rcStrict;
3228 }
3229
3230 /* Must be a code segment. */
3231 if (!DescCS.Legacy.Gen.u1DescType)
3232 {
3233 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3234 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3235 }
3236 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241
3242 /* Don't allow lowering the privilege level. */
3243 /** @todo Does the lowering of privileges apply to software interrupts
3244 * only? This has bearings on the more-privileged or
3245 * same-privilege stack behavior further down. A testcase would
3246 * be nice. */
3247 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3250 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3251 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3252 }
3253
3254 /* Make sure the selector is present. */
3255 if (!DescCS.Legacy.Gen.u1Present)
3256 {
3257 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3258 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3259 }
3260
3261#ifdef LOG_ENABLED
3262 /* If software interrupt, try decode it if logging is enabled and such. */
3263 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3264 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3265 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3266#endif
3267
3268 /* Check the new EIP against the new CS limit. */
3269 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3270 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3271 ? Idte.Gate.u16OffsetLow
3272 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3273 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3274 if (uNewEip > cbLimitCS)
3275 {
3276 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3277 u8Vector, uNewEip, cbLimitCS, NewCS));
3278 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3279 }
3280 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3281
3282 /* Calc the flag image to push. */
3283 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3284 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3285 fEfl &= ~X86_EFL_RF;
3286 else
3287 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3288
3289 /* From V8086 mode only go to CPL 0. */
3290 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3291 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3292 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3293 {
3294 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3295 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3296 }
3297
3298 /*
3299 * If the privilege level changes, we need to get a new stack from the TSS.
3300 * This in turns means validating the new SS and ESP...
3301 */
3302 if (uNewCpl != IEM_GET_CPL(pVCpu))
3303 {
3304 RTSEL NewSS;
3305 uint32_t uNewEsp;
3306 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3307 if (rcStrict != VINF_SUCCESS)
3308 return rcStrict;
3309
3310 IEMSELDESC DescSS;
3311 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3315 if (!DescSS.Legacy.Gen.u1DefBig)
3316 {
3317 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3318 uNewEsp = (uint16_t)uNewEsp;
3319 }
3320
3321 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3322
3323 /* Check that there is sufficient space for the stack frame. */
3324 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3325 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3326 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3327 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3328
3329 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3330 {
3331 if ( uNewEsp - 1 > cbLimitSS
3332 || uNewEsp < cbStackFrame)
3333 {
3334 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3335 u8Vector, NewSS, uNewEsp, cbStackFrame));
3336 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3337 }
3338 }
3339 else
3340 {
3341 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3342 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3343 {
3344 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3345 u8Vector, NewSS, uNewEsp, cbStackFrame));
3346 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3347 }
3348 }
3349
3350 /*
3351 * Start making changes.
3352 */
3353
3354 /* Set the new CPL so that stack accesses use it. */
3355 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3356 IEM_SET_CPL(pVCpu, uNewCpl);
3357
3358 /* Create the stack frame. */
3359 RTPTRUNION uStackFrame;
3360 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3361 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3362 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3363 if (rcStrict != VINF_SUCCESS)
3364 return rcStrict;
3365 void * const pvStackFrame = uStackFrame.pv;
3366 if (f32BitGate)
3367 {
3368 if (fFlags & IEM_XCPT_FLAGS_ERR)
3369 *uStackFrame.pu32++ = uErr;
3370 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3371 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3372 uStackFrame.pu32[2] = fEfl;
3373 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3374 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3375 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3376 if (fEfl & X86_EFL_VM)
3377 {
3378 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3379 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3380 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3381 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3382 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3383 }
3384 }
3385 else
3386 {
3387 if (fFlags & IEM_XCPT_FLAGS_ERR)
3388 *uStackFrame.pu16++ = uErr;
3389 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3390 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3391 uStackFrame.pu16[2] = fEfl;
3392 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3393 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3394 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3395 if (fEfl & X86_EFL_VM)
3396 {
3397 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3398 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3399 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3400 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3401 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3402 }
3403 }
3404 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3405 if (rcStrict != VINF_SUCCESS)
3406 return rcStrict;
3407
3408 /* Mark the selectors 'accessed' (hope this is the correct time). */
3409 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3410 * after pushing the stack frame? (Write protect the gdt + stack to
3411 * find out.) */
3412 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3413 {
3414 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3415 if (rcStrict != VINF_SUCCESS)
3416 return rcStrict;
3417 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3418 }
3419
3420 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3421 {
3422 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3423 if (rcStrict != VINF_SUCCESS)
3424 return rcStrict;
3425 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3426 }
3427
3428 /*
3429 * Start comitting the register changes (joins with the DPL=CPL branch).
3430 */
3431 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3432 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3433 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3434 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3435 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3436 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3437 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3438 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3439 * SP is loaded).
3440 * Need to check the other combinations too:
3441 * - 16-bit TSS, 32-bit handler
3442 * - 32-bit TSS, 16-bit handler */
3443 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3444 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3445 else
3446 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3447
3448 if (fEfl & X86_EFL_VM)
3449 {
3450 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3451 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3452 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3453 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3454 }
3455 }
3456 /*
3457 * Same privilege, no stack change and smaller stack frame.
3458 */
3459 else
3460 {
3461 uint64_t uNewRsp;
3462 RTPTRUNION uStackFrame;
3463 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3464 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3465 if (rcStrict != VINF_SUCCESS)
3466 return rcStrict;
3467 void * const pvStackFrame = uStackFrame.pv;
3468
3469 if (f32BitGate)
3470 {
3471 if (fFlags & IEM_XCPT_FLAGS_ERR)
3472 *uStackFrame.pu32++ = uErr;
3473 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3474 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3475 uStackFrame.pu32[2] = fEfl;
3476 }
3477 else
3478 {
3479 if (fFlags & IEM_XCPT_FLAGS_ERR)
3480 *uStackFrame.pu16++ = uErr;
3481 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3482 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3483 uStackFrame.pu16[2] = fEfl;
3484 }
3485 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3486 if (rcStrict != VINF_SUCCESS)
3487 return rcStrict;
3488
3489 /* Mark the CS selector as 'accessed'. */
3490 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3491 {
3492 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3493 if (rcStrict != VINF_SUCCESS)
3494 return rcStrict;
3495 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3496 }
3497
3498 /*
3499 * Start committing the register changes (joins with the other branch).
3500 */
3501 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3502 }
3503
3504 /* ... register committing continues. */
3505 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3506 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3507 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3508 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3509 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3510 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3511
3512 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3513 fEfl &= ~fEflToClear;
3514 IEMMISC_SET_EFL(pVCpu, fEfl);
3515
3516 if (fFlags & IEM_XCPT_FLAGS_CR2)
3517 pVCpu->cpum.GstCtx.cr2 = uCr2;
3518
3519 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3520 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3521
3522 /* Make sure the execution flags are correct. */
3523 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3524 if (fExecNew != pVCpu->iem.s.fExec)
3525 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3526 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3527 pVCpu->iem.s.fExec = fExecNew;
3528 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3529
3530 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3531}
3532
3533
3534/**
3535 * Implements exceptions and interrupts for long mode.
3536 *
3537 * @returns VBox strict status code.
3538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3539 * @param cbInstr The number of bytes to offset rIP by in the return
3540 * address.
3541 * @param u8Vector The interrupt / exception vector number.
3542 * @param fFlags The flags.
3543 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3544 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3545 */
3546static VBOXSTRICTRC
3547iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3548 uint8_t cbInstr,
3549 uint8_t u8Vector,
3550 uint32_t fFlags,
3551 uint16_t uErr,
3552 uint64_t uCr2) RT_NOEXCEPT
3553{
3554 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3555
3556 /*
3557 * Read the IDT entry.
3558 */
3559 uint16_t offIdt = (uint16_t)u8Vector << 4;
3560 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3561 {
3562 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3563 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3564 }
3565 X86DESC64 Idte;
3566#ifdef _MSC_VER /* Shut up silly compiler warning. */
3567 Idte.au64[0] = 0;
3568 Idte.au64[1] = 0;
3569#endif
3570 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3571 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3572 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3573 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3574 {
3575 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3576 return rcStrict;
3577 }
3578 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3579 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3580 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3581
3582 /*
3583 * Check the descriptor type, DPL and such.
3584 * ASSUMES this is done in the same order as described for call-gate calls.
3585 */
3586 if (Idte.Gate.u1DescType)
3587 {
3588 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3589 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3590 }
3591 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3592 switch (Idte.Gate.u4Type)
3593 {
3594 case AMD64_SEL_TYPE_SYS_INT_GATE:
3595 fEflToClear |= X86_EFL_IF;
3596 break;
3597 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3598 break;
3599
3600 default:
3601 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3602 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3603 }
3604
3605 /* Check DPL against CPL if applicable. */
3606 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3607 {
3608 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3609 {
3610 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3611 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3612 }
3613 }
3614
3615 /* Is it there? */
3616 if (!Idte.Gate.u1Present)
3617 {
3618 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3619 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3620 }
3621
3622 /* A null CS is bad. */
3623 RTSEL NewCS = Idte.Gate.u16Sel;
3624 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3625 {
3626 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3627 return iemRaiseGeneralProtectionFault0(pVCpu);
3628 }
3629
3630 /* Fetch the descriptor for the new CS. */
3631 IEMSELDESC DescCS;
3632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3633 if (rcStrict != VINF_SUCCESS)
3634 {
3635 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3636 return rcStrict;
3637 }
3638
3639 /* Must be a 64-bit code segment. */
3640 if (!DescCS.Long.Gen.u1DescType)
3641 {
3642 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3643 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3644 }
3645 if ( !DescCS.Long.Gen.u1Long
3646 || DescCS.Long.Gen.u1DefBig
3647 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3648 {
3649 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3650 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3651 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3652 }
3653
3654 /* Don't allow lowering the privilege level. For non-conforming CS
3655 selectors, the CS.DPL sets the privilege level the trap/interrupt
3656 handler runs at. For conforming CS selectors, the CPL remains
3657 unchanged, but the CS.DPL must be <= CPL. */
3658 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3659 * when CPU in Ring-0. Result \#GP? */
3660 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3661 {
3662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3663 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3664 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3665 }
3666
3667
3668 /* Make sure the selector is present. */
3669 if (!DescCS.Legacy.Gen.u1Present)
3670 {
3671 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3672 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3673 }
3674
3675 /* Check that the new RIP is canonical. */
3676 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3677 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3678 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3679 if (!IEM_IS_CANONICAL(uNewRip))
3680 {
3681 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3682 return iemRaiseGeneralProtectionFault0(pVCpu);
3683 }
3684
3685 /*
3686 * If the privilege level changes or if the IST isn't zero, we need to get
3687 * a new stack from the TSS.
3688 */
3689 uint64_t uNewRsp;
3690 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3691 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3692 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3693 || Idte.Gate.u3IST != 0)
3694 {
3695 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3696 if (rcStrict != VINF_SUCCESS)
3697 return rcStrict;
3698 }
3699 else
3700 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3701 uNewRsp &= ~(uint64_t)0xf;
3702
3703 /*
3704 * Calc the flag image to push.
3705 */
3706 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3707 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3708 fEfl &= ~X86_EFL_RF;
3709 else
3710 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3711
3712 /*
3713 * Start making changes.
3714 */
3715 /* Set the new CPL so that stack accesses use it. */
3716 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3717 IEM_SET_CPL(pVCpu, uNewCpl);
3718/** @todo Setting CPL this early seems wrong as it would affect and errors we
3719 * raise accessing the stack and (?) GDT/LDT... */
3720
3721 /* Create the stack frame. */
3722 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3723 RTPTRUNION uStackFrame;
3724 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3725 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3726 if (rcStrict != VINF_SUCCESS)
3727 return rcStrict;
3728 void * const pvStackFrame = uStackFrame.pv;
3729
3730 if (fFlags & IEM_XCPT_FLAGS_ERR)
3731 *uStackFrame.pu64++ = uErr;
3732 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3733 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3734 uStackFrame.pu64[2] = fEfl;
3735 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3736 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3737 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3742 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3743 * after pushing the stack frame? (Write protect the gdt + stack to
3744 * find out.) */
3745 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3746 {
3747 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3748 if (rcStrict != VINF_SUCCESS)
3749 return rcStrict;
3750 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3751 }
3752
3753 /*
3754 * Start comitting the register changes.
3755 */
3756 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3757 * hidden registers when interrupting 32-bit or 16-bit code! */
3758 if (uNewCpl != uOldCpl)
3759 {
3760 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3761 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3762 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3763 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3764 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3765 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3766 }
3767 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3768 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3769 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3770 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3771 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3772 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3774 pVCpu->cpum.GstCtx.rip = uNewRip;
3775
3776 fEfl &= ~fEflToClear;
3777 IEMMISC_SET_EFL(pVCpu, fEfl);
3778
3779 if (fFlags & IEM_XCPT_FLAGS_CR2)
3780 pVCpu->cpum.GstCtx.cr2 = uCr2;
3781
3782 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3783 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3784
3785 iemRecalcExecModeAndCplFlags(pVCpu);
3786
3787 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3788}
3789
3790
3791/**
3792 * Implements exceptions and interrupts.
3793 *
3794 * All exceptions and interrupts goes thru this function!
3795 *
3796 * @returns VBox strict status code.
3797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3798 * @param cbInstr The number of bytes to offset rIP by in the return
3799 * address.
3800 * @param u8Vector The interrupt / exception vector number.
3801 * @param fFlags The flags.
3802 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3803 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3804 */
3805VBOXSTRICTRC
3806iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3807 uint8_t cbInstr,
3808 uint8_t u8Vector,
3809 uint32_t fFlags,
3810 uint16_t uErr,
3811 uint64_t uCr2) RT_NOEXCEPT
3812{
3813 /*
3814 * Get all the state that we might need here.
3815 */
3816 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3817 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3818
3819#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3820 /*
3821 * Flush prefetch buffer
3822 */
3823 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3824#endif
3825
3826 /*
3827 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3828 */
3829 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3830 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3831 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3832 | IEM_XCPT_FLAGS_BP_INSTR
3833 | IEM_XCPT_FLAGS_ICEBP_INSTR
3834 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3835 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3836 {
3837 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3838 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3839 u8Vector = X86_XCPT_GP;
3840 uErr = 0;
3841 }
3842#ifdef DBGFTRACE_ENABLED
3843 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3844 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3845 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3846#endif
3847
3848 /*
3849 * Evaluate whether NMI blocking should be in effect.
3850 * Normally, NMI blocking is in effect whenever we inject an NMI.
3851 */
3852 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3853 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3854
3855#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3856 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3857 {
3858 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3859 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3860 return rcStrict0;
3861
3862 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3863 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3864 {
3865 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3866 fBlockNmi = false;
3867 }
3868 }
3869#endif
3870
3871#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3872 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3873 {
3874 /*
3875 * If the event is being injected as part of VMRUN, it isn't subject to event
3876 * intercepts in the nested-guest. However, secondary exceptions that occur
3877 * during injection of any event -are- subject to exception intercepts.
3878 *
3879 * See AMD spec. 15.20 "Event Injection".
3880 */
3881 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3882 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3883 else
3884 {
3885 /*
3886 * Check and handle if the event being raised is intercepted.
3887 */
3888 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3889 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3890 return rcStrict0;
3891 }
3892 }
3893#endif
3894
3895 /*
3896 * Set NMI blocking if necessary.
3897 */
3898 if (fBlockNmi)
3899 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3900
3901 /*
3902 * Do recursion accounting.
3903 */
3904 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3905 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3906 if (pVCpu->iem.s.cXcptRecursions == 0)
3907 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3908 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3909 else
3910 {
3911 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3912 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3913 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3914
3915 if (pVCpu->iem.s.cXcptRecursions >= 4)
3916 {
3917#ifdef DEBUG_bird
3918 AssertFailed();
3919#endif
3920 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3921 }
3922
3923 /*
3924 * Evaluate the sequence of recurring events.
3925 */
3926 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3927 NULL /* pXcptRaiseInfo */);
3928 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3929 { /* likely */ }
3930 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3931 {
3932 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3933 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3934 u8Vector = X86_XCPT_DF;
3935 uErr = 0;
3936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3937 /* VMX nested-guest #DF intercept needs to be checked here. */
3938 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3939 {
3940 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3941 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3942 return rcStrict0;
3943 }
3944#endif
3945 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3946 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3947 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3948 }
3949 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3950 {
3951 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3952 return iemInitiateCpuShutdown(pVCpu);
3953 }
3954 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3955 {
3956 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3957 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3958 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3959 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3960 return VERR_EM_GUEST_CPU_HANG;
3961 }
3962 else
3963 {
3964 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3965 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3966 return VERR_IEM_IPE_9;
3967 }
3968
3969 /*
3970 * The 'EXT' bit is set when an exception occurs during deliver of an external
3971 * event (such as an interrupt or earlier exception)[1]. Privileged software
3972 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3973 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3974 *
3975 * [1] - Intel spec. 6.13 "Error Code"
3976 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3977 * [3] - Intel Instruction reference for INT n.
3978 */
3979 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3980 && (fFlags & IEM_XCPT_FLAGS_ERR)
3981 && u8Vector != X86_XCPT_PF
3982 && u8Vector != X86_XCPT_DF)
3983 {
3984 uErr |= X86_TRAP_ERR_EXTERNAL;
3985 }
3986 }
3987
3988 pVCpu->iem.s.cXcptRecursions++;
3989 pVCpu->iem.s.uCurXcpt = u8Vector;
3990 pVCpu->iem.s.fCurXcpt = fFlags;
3991 pVCpu->iem.s.uCurXcptErr = uErr;
3992 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3993
3994 /*
3995 * Extensive logging.
3996 */
3997#if defined(LOG_ENABLED) && defined(IN_RING3)
3998 if (LogIs3Enabled())
3999 {
4000 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4001 PVM pVM = pVCpu->CTX_SUFF(pVM);
4002 char szRegs[4096];
4003 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4004 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4005 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4006 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4007 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4008 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4009 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4010 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4011 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4012 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4013 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4014 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4015 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4016 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4017 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4018 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4019 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4020 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4021 " efer=%016VR{efer}\n"
4022 " pat=%016VR{pat}\n"
4023 " sf_mask=%016VR{sf_mask}\n"
4024 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4025 " lstar=%016VR{lstar}\n"
4026 " star=%016VR{star} cstar=%016VR{cstar}\n"
4027 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4028 );
4029
4030 char szInstr[256];
4031 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4032 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4033 szInstr, sizeof(szInstr), NULL);
4034 Log3(("%s%s\n", szRegs, szInstr));
4035 }
4036#endif /* LOG_ENABLED */
4037
4038 /*
4039 * Stats.
4040 */
4041 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4042 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4043 else if (u8Vector <= X86_XCPT_LAST)
4044 {
4045 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4046 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4047 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4048 }
4049
4050 /*
4051 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4052 * to ensure that a stale TLB or paging cache entry will only cause one
4053 * spurious #PF.
4054 */
4055 if ( u8Vector == X86_XCPT_PF
4056 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4057 IEMTlbInvalidatePage(pVCpu, uCr2);
4058
4059 /*
4060 * Call the mode specific worker function.
4061 */
4062 VBOXSTRICTRC rcStrict;
4063 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4064 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4065 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4066 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4067 else
4068 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4069
4070 /* Flush the prefetch buffer. */
4071#ifdef IEM_WITH_CODE_TLB
4072 pVCpu->iem.s.pbInstrBuf = NULL;
4073#else
4074 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4075#endif
4076
4077 /*
4078 * Unwind.
4079 */
4080 pVCpu->iem.s.cXcptRecursions--;
4081 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4082 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4083 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4084 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4085 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4086 return rcStrict;
4087}
4088
4089#ifdef IEM_WITH_SETJMP
4090/**
4091 * See iemRaiseXcptOrInt. Will not return.
4092 */
4093DECL_NO_RETURN(void)
4094iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4095 uint8_t cbInstr,
4096 uint8_t u8Vector,
4097 uint32_t fFlags,
4098 uint16_t uErr,
4099 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4100{
4101 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4102 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4103}
4104#endif
4105
4106
4107/** \#DE - 00. */
4108VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4109{
4110 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4111}
4112
4113
4114/** \#DB - 01.
4115 * @note This automatically clear DR7.GD. */
4116VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4117{
4118 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4119 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4121}
4122
4123
4124/** \#BR - 05. */
4125VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4126{
4127 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4128}
4129
4130
4131/** \#UD - 06. */
4132VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4133{
4134 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4135}
4136
4137
4138/** \#NM - 07. */
4139VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4140{
4141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4142}
4143
4144
4145/** \#TS(err) - 0a. */
4146VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4149}
4150
4151
4152/** \#TS(tr) - 0a. */
4153VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4154{
4155 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4156 pVCpu->cpum.GstCtx.tr.Sel, 0);
4157}
4158
4159
4160/** \#TS(0) - 0a. */
4161VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4162{
4163 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4164 0, 0);
4165}
4166
4167
4168/** \#TS(err) - 0a. */
4169VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4170{
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4172 uSel & X86_SEL_MASK_OFF_RPL, 0);
4173}
4174
4175
4176/** \#NP(err) - 0b. */
4177VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4180}
4181
4182
4183/** \#NP(sel) - 0b. */
4184VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4185{
4186 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4187 uSel & ~X86_SEL_RPL, 0);
4188}
4189
4190
4191/** \#SS(seg) - 0c. */
4192VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4193{
4194 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4195 uSel & ~X86_SEL_RPL, 0);
4196}
4197
4198
4199/** \#SS(err) - 0c. */
4200VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4203}
4204
4205
4206/** \#GP(n) - 0d. */
4207VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4210}
4211
4212
4213/** \#GP(0) - 0d. */
4214VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4217}
4218
4219#ifdef IEM_WITH_SETJMP
4220/** \#GP(0) - 0d. */
4221DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4222{
4223 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4224}
4225#endif
4226
4227
4228/** \#GP(sel) - 0d. */
4229VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4230{
4231 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4232 Sel & ~X86_SEL_RPL, 0);
4233}
4234
4235
4236/** \#GP(0) - 0d. */
4237VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4238{
4239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4240}
4241
4242
4243/** \#GP(sel) - 0d. */
4244VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4245{
4246 NOREF(iSegReg); NOREF(fAccess);
4247 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4248 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4249}
4250
4251#ifdef IEM_WITH_SETJMP
4252/** \#GP(sel) - 0d, longjmp. */
4253DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4254{
4255 NOREF(iSegReg); NOREF(fAccess);
4256 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4257 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4258}
4259#endif
4260
4261/** \#GP(sel) - 0d. */
4262VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4263{
4264 NOREF(Sel);
4265 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4266}
4267
4268#ifdef IEM_WITH_SETJMP
4269/** \#GP(sel) - 0d, longjmp. */
4270DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4271{
4272 NOREF(Sel);
4273 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4274}
4275#endif
4276
4277
4278/** \#GP(sel) - 0d. */
4279VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4280{
4281 NOREF(iSegReg); NOREF(fAccess);
4282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4283}
4284
4285#ifdef IEM_WITH_SETJMP
4286/** \#GP(sel) - 0d, longjmp. */
4287DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4288{
4289 NOREF(iSegReg); NOREF(fAccess);
4290 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4291}
4292#endif
4293
4294
4295/** \#PF(n) - 0e. */
4296VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4297{
4298 uint16_t uErr;
4299 switch (rc)
4300 {
4301 case VERR_PAGE_NOT_PRESENT:
4302 case VERR_PAGE_TABLE_NOT_PRESENT:
4303 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4304 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4305 uErr = 0;
4306 break;
4307
4308 default:
4309 AssertMsgFailed(("%Rrc\n", rc));
4310 RT_FALL_THRU();
4311 case VERR_ACCESS_DENIED:
4312 uErr = X86_TRAP_PF_P;
4313 break;
4314
4315 /** @todo reserved */
4316 }
4317
4318 if (IEM_GET_CPL(pVCpu) == 3)
4319 uErr |= X86_TRAP_PF_US;
4320
4321 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4322 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4323 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4324 uErr |= X86_TRAP_PF_ID;
4325
4326#if 0 /* This is so much non-sense, really. Why was it done like that? */
4327 /* Note! RW access callers reporting a WRITE protection fault, will clear
4328 the READ flag before calling. So, read-modify-write accesses (RW)
4329 can safely be reported as READ faults. */
4330 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4331 uErr |= X86_TRAP_PF_RW;
4332#else
4333 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4334 {
4335 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4336 /// (regardless of outcome of the comparison in the latter case).
4337 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4338 uErr |= X86_TRAP_PF_RW;
4339 }
4340#endif
4341
4342 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4343 of the memory operand rather than at the start of it. (Not sure what
4344 happens if it crosses a page boundrary.) The current heuristics for
4345 this is to report the #PF for the last byte if the access is more than
4346 64 bytes. This is probably not correct, but we can work that out later,
4347 main objective now is to get FXSAVE to work like for real hardware and
4348 make bs3-cpu-basic2 work. */
4349 if (cbAccess <= 64)
4350 { /* likely*/ }
4351 else
4352 GCPtrWhere += cbAccess - 1;
4353
4354 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4355 uErr, GCPtrWhere);
4356}
4357
4358#ifdef IEM_WITH_SETJMP
4359/** \#PF(n) - 0e, longjmp. */
4360DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4361 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4362{
4363 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4364}
4365#endif
4366
4367
4368/** \#MF(0) - 10. */
4369VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4370{
4371 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4372 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4373
4374 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4375 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4376 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4377}
4378
4379
4380/** \#AC(0) - 11. */
4381VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4382{
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4384}
4385
4386#ifdef IEM_WITH_SETJMP
4387/** \#AC(0) - 11, longjmp. */
4388DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4389{
4390 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4391}
4392#endif
4393
4394
4395/** \#XF(0)/\#XM(0) - 19. */
4396VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4397{
4398 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4399}
4400
4401
4402/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4403IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4404{
4405 NOREF(cbInstr);
4406 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4407}
4408
4409
4410/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4411IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4412{
4413 NOREF(cbInstr);
4414 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4415}
4416
4417
4418/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4419IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4420{
4421 NOREF(cbInstr);
4422 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4423}
4424
4425
4426/** @} */
4427
4428/** @name Common opcode decoders.
4429 * @{
4430 */
4431//#include <iprt/mem.h>
4432
4433/**
4434 * Used to add extra details about a stub case.
4435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4436 */
4437void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4438{
4439#if defined(LOG_ENABLED) && defined(IN_RING3)
4440 PVM pVM = pVCpu->CTX_SUFF(pVM);
4441 char szRegs[4096];
4442 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4443 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4444 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4445 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4446 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4447 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4448 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4449 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4450 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4451 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4452 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4453 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4454 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4455 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4456 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4457 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4458 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4459 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4460 " efer=%016VR{efer}\n"
4461 " pat=%016VR{pat}\n"
4462 " sf_mask=%016VR{sf_mask}\n"
4463 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4464 " lstar=%016VR{lstar}\n"
4465 " star=%016VR{star} cstar=%016VR{cstar}\n"
4466 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4467 );
4468
4469 char szInstr[256];
4470 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4471 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4472 szInstr, sizeof(szInstr), NULL);
4473
4474 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4475#else
4476 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4477#endif
4478}
4479
4480/** @} */
4481
4482
4483
4484/** @name Register Access.
4485 * @{
4486 */
4487
4488/**
4489 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4490 *
4491 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4492 * segment limit.
4493 *
4494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4495 * @param cbInstr Instruction size.
4496 * @param offNextInstr The offset of the next instruction.
4497 * @param enmEffOpSize Effective operand size.
4498 */
4499VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4500 IEMMODE enmEffOpSize) RT_NOEXCEPT
4501{
4502 switch (enmEffOpSize)
4503 {
4504 case IEMMODE_16BIT:
4505 {
4506 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4507 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4508 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4509 pVCpu->cpum.GstCtx.rip = uNewIp;
4510 else
4511 return iemRaiseGeneralProtectionFault0(pVCpu);
4512 break;
4513 }
4514
4515 case IEMMODE_32BIT:
4516 {
4517 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4518 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4519
4520 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4521 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4522 pVCpu->cpum.GstCtx.rip = uNewEip;
4523 else
4524 return iemRaiseGeneralProtectionFault0(pVCpu);
4525 break;
4526 }
4527
4528 case IEMMODE_64BIT:
4529 {
4530 Assert(IEM_IS_64BIT_CODE(pVCpu));
4531
4532 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4533 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4534 pVCpu->cpum.GstCtx.rip = uNewRip;
4535 else
4536 return iemRaiseGeneralProtectionFault0(pVCpu);
4537 break;
4538 }
4539
4540 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4541 }
4542
4543#ifndef IEM_WITH_CODE_TLB
4544 /* Flush the prefetch buffer. */
4545 pVCpu->iem.s.cbOpcode = cbInstr;
4546#endif
4547
4548 /*
4549 * Clear RF and finish the instruction (maybe raise #DB).
4550 */
4551 return iemRegFinishClearingRF(pVCpu);
4552}
4553
4554
4555/**
4556 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4557 *
4558 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4559 * segment limit.
4560 *
4561 * @returns Strict VBox status code.
4562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4563 * @param cbInstr Instruction size.
4564 * @param offNextInstr The offset of the next instruction.
4565 */
4566VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4567{
4568 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4569
4570 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4571 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4572 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4573 pVCpu->cpum.GstCtx.rip = uNewIp;
4574 else
4575 return iemRaiseGeneralProtectionFault0(pVCpu);
4576
4577#ifndef IEM_WITH_CODE_TLB
4578 /* Flush the prefetch buffer. */
4579 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4580#endif
4581
4582 /*
4583 * Clear RF and finish the instruction (maybe raise #DB).
4584 */
4585 return iemRegFinishClearingRF(pVCpu);
4586}
4587
4588
4589/**
4590 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4591 *
4592 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4593 * segment limit.
4594 *
4595 * @returns Strict VBox status code.
4596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4597 * @param cbInstr Instruction size.
4598 * @param offNextInstr The offset of the next instruction.
4599 * @param enmEffOpSize Effective operand size.
4600 */
4601VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4602 IEMMODE enmEffOpSize) RT_NOEXCEPT
4603{
4604 if (enmEffOpSize == IEMMODE_32BIT)
4605 {
4606 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4607
4608 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4609 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4610 pVCpu->cpum.GstCtx.rip = uNewEip;
4611 else
4612 return iemRaiseGeneralProtectionFault0(pVCpu);
4613 }
4614 else
4615 {
4616 Assert(enmEffOpSize == IEMMODE_64BIT);
4617
4618 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4619 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4620 pVCpu->cpum.GstCtx.rip = uNewRip;
4621 else
4622 return iemRaiseGeneralProtectionFault0(pVCpu);
4623 }
4624
4625#ifndef IEM_WITH_CODE_TLB
4626 /* Flush the prefetch buffer. */
4627 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4628#endif
4629
4630 /*
4631 * Clear RF and finish the instruction (maybe raise #DB).
4632 */
4633 return iemRegFinishClearingRF(pVCpu);
4634}
4635
4636
4637/**
4638 * Performs a near jump to the specified address.
4639 *
4640 * May raise a \#GP(0) if the new IP outside the code segment limit.
4641 *
4642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4643 * @param uNewIp The new IP value.
4644 */
4645VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4646{
4647 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4648 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4649 pVCpu->cpum.GstCtx.rip = uNewIp;
4650 else
4651 return iemRaiseGeneralProtectionFault0(pVCpu);
4652 /** @todo Test 16-bit jump in 64-bit mode. */
4653
4654#ifndef IEM_WITH_CODE_TLB
4655 /* Flush the prefetch buffer. */
4656 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4657#endif
4658
4659 /*
4660 * Clear RF and finish the instruction (maybe raise #DB).
4661 */
4662 return iemRegFinishClearingRF(pVCpu);
4663}
4664
4665
4666/**
4667 * Performs a near jump to the specified address.
4668 *
4669 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4670 *
4671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4672 * @param uNewEip The new EIP value.
4673 */
4674VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4675{
4676 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4677 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4678
4679 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4680 pVCpu->cpum.GstCtx.rip = uNewEip;
4681 else
4682 return iemRaiseGeneralProtectionFault0(pVCpu);
4683
4684#ifndef IEM_WITH_CODE_TLB
4685 /* Flush the prefetch buffer. */
4686 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4687#endif
4688
4689 /*
4690 * Clear RF and finish the instruction (maybe raise #DB).
4691 */
4692 return iemRegFinishClearingRF(pVCpu);
4693}
4694
4695
4696/**
4697 * Performs a near jump to the specified address.
4698 *
4699 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4700 * segment limit.
4701 *
4702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4703 * @param uNewRip The new RIP value.
4704 */
4705VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4706{
4707 Assert(IEM_IS_64BIT_CODE(pVCpu));
4708
4709 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4710 pVCpu->cpum.GstCtx.rip = uNewRip;
4711 else
4712 return iemRaiseGeneralProtectionFault0(pVCpu);
4713
4714#ifndef IEM_WITH_CODE_TLB
4715 /* Flush the prefetch buffer. */
4716 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4717#endif
4718
4719 /*
4720 * Clear RF and finish the instruction (maybe raise #DB).
4721 */
4722 return iemRegFinishClearingRF(pVCpu);
4723}
4724
4725/** @} */
4726
4727
4728/** @name FPU access and helpers.
4729 *
4730 * @{
4731 */
4732
4733/**
4734 * Updates the x87.DS and FPUDP registers.
4735 *
4736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4737 * @param pFpuCtx The FPU context.
4738 * @param iEffSeg The effective segment register.
4739 * @param GCPtrEff The effective address relative to @a iEffSeg.
4740 */
4741DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4742{
4743 RTSEL sel;
4744 switch (iEffSeg)
4745 {
4746 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4747 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4748 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4749 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4750 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4751 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4752 default:
4753 AssertMsgFailed(("%d\n", iEffSeg));
4754 sel = pVCpu->cpum.GstCtx.ds.Sel;
4755 }
4756 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4757 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4758 {
4759 pFpuCtx->DS = 0;
4760 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4761 }
4762 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4763 {
4764 pFpuCtx->DS = sel;
4765 pFpuCtx->FPUDP = GCPtrEff;
4766 }
4767 else
4768 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4769}
4770
4771
4772/**
4773 * Rotates the stack registers in the push direction.
4774 *
4775 * @param pFpuCtx The FPU context.
4776 * @remarks This is a complete waste of time, but fxsave stores the registers in
4777 * stack order.
4778 */
4779DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4780{
4781 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4782 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4783 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4784 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4785 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4786 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4787 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4788 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4789 pFpuCtx->aRegs[0].r80 = r80Tmp;
4790}
4791
4792
4793/**
4794 * Rotates the stack registers in the pop direction.
4795 *
4796 * @param pFpuCtx The FPU context.
4797 * @remarks This is a complete waste of time, but fxsave stores the registers in
4798 * stack order.
4799 */
4800DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4801{
4802 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4803 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4804 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4805 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4806 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4807 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4808 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4809 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4810 pFpuCtx->aRegs[7].r80 = r80Tmp;
4811}
4812
4813
4814/**
4815 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4816 * exception prevents it.
4817 *
4818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4819 * @param pResult The FPU operation result to push.
4820 * @param pFpuCtx The FPU context.
4821 */
4822static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4823{
4824 /* Update FSW and bail if there are pending exceptions afterwards. */
4825 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4826 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4827 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4828 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4829 {
4830 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4831 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4832 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4833 pFpuCtx->FSW = fFsw;
4834 return;
4835 }
4836
4837 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4838 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4839 {
4840 /* All is fine, push the actual value. */
4841 pFpuCtx->FTW |= RT_BIT(iNewTop);
4842 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4843 }
4844 else if (pFpuCtx->FCW & X86_FCW_IM)
4845 {
4846 /* Masked stack overflow, push QNaN. */
4847 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4848 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4849 }
4850 else
4851 {
4852 /* Raise stack overflow, don't push anything. */
4853 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4854 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4855 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4856 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4857 return;
4858 }
4859
4860 fFsw &= ~X86_FSW_TOP_MASK;
4861 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4862 pFpuCtx->FSW = fFsw;
4863
4864 iemFpuRotateStackPush(pFpuCtx);
4865 RT_NOREF(pVCpu);
4866}
4867
4868
4869/**
4870 * Stores a result in a FPU register and updates the FSW and FTW.
4871 *
4872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4873 * @param pFpuCtx The FPU context.
4874 * @param pResult The result to store.
4875 * @param iStReg Which FPU register to store it in.
4876 */
4877static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4878{
4879 Assert(iStReg < 8);
4880 uint16_t fNewFsw = pFpuCtx->FSW;
4881 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4882 fNewFsw &= ~X86_FSW_C_MASK;
4883 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4884 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4885 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4886 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4887 pFpuCtx->FSW = fNewFsw;
4888 pFpuCtx->FTW |= RT_BIT(iReg);
4889 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4890 RT_NOREF(pVCpu);
4891}
4892
4893
4894/**
4895 * Only updates the FPU status word (FSW) with the result of the current
4896 * instruction.
4897 *
4898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4899 * @param pFpuCtx The FPU context.
4900 * @param u16FSW The FSW output of the current instruction.
4901 */
4902static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4903{
4904 uint16_t fNewFsw = pFpuCtx->FSW;
4905 fNewFsw &= ~X86_FSW_C_MASK;
4906 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4907 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4908 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4909 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4910 pFpuCtx->FSW = fNewFsw;
4911 RT_NOREF(pVCpu);
4912}
4913
4914
4915/**
4916 * Pops one item off the FPU stack if no pending exception prevents it.
4917 *
4918 * @param pFpuCtx The FPU context.
4919 */
4920static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4921{
4922 /* Check pending exceptions. */
4923 uint16_t uFSW = pFpuCtx->FSW;
4924 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4925 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4926 return;
4927
4928 /* TOP--. */
4929 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4930 uFSW &= ~X86_FSW_TOP_MASK;
4931 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4932 pFpuCtx->FSW = uFSW;
4933
4934 /* Mark the previous ST0 as empty. */
4935 iOldTop >>= X86_FSW_TOP_SHIFT;
4936 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4937
4938 /* Rotate the registers. */
4939 iemFpuRotateStackPop(pFpuCtx);
4940}
4941
4942
4943/**
4944 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4945 *
4946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4947 * @param pResult The FPU operation result to push.
4948 * @param uFpuOpcode The FPU opcode value.
4949 */
4950void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4951{
4952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4953 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4954 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4955}
4956
4957
4958/**
4959 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4960 * and sets FPUDP and FPUDS.
4961 *
4962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4963 * @param pResult The FPU operation result to push.
4964 * @param iEffSeg The effective segment register.
4965 * @param GCPtrEff The effective address relative to @a iEffSeg.
4966 * @param uFpuOpcode The FPU opcode value.
4967 */
4968void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4969 uint16_t uFpuOpcode) RT_NOEXCEPT
4970{
4971 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4972 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4973 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4974 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4975}
4976
4977
4978/**
4979 * Replace ST0 with the first value and push the second onto the FPU stack,
4980 * unless a pending exception prevents it.
4981 *
4982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4983 * @param pResult The FPU operation result to store and push.
4984 * @param uFpuOpcode The FPU opcode value.
4985 */
4986void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4987{
4988 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4989 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4990
4991 /* Update FSW and bail if there are pending exceptions afterwards. */
4992 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4993 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4994 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4995 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4996 {
4997 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4998 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4999 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5000 pFpuCtx->FSW = fFsw;
5001 return;
5002 }
5003
5004 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5005 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5006 {
5007 /* All is fine, push the actual value. */
5008 pFpuCtx->FTW |= RT_BIT(iNewTop);
5009 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5010 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5011 }
5012 else if (pFpuCtx->FCW & X86_FCW_IM)
5013 {
5014 /* Masked stack overflow, push QNaN. */
5015 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5016 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5018 }
5019 else
5020 {
5021 /* Raise stack overflow, don't push anything. */
5022 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5023 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5024 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5025 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5026 return;
5027 }
5028
5029 fFsw &= ~X86_FSW_TOP_MASK;
5030 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5031 pFpuCtx->FSW = fFsw;
5032
5033 iemFpuRotateStackPush(pFpuCtx);
5034}
5035
5036
5037/**
5038 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5039 * FOP.
5040 *
5041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5042 * @param pResult The result to store.
5043 * @param iStReg Which FPU register to store it in.
5044 * @param uFpuOpcode The FPU opcode value.
5045 */
5046void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5047{
5048 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5049 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5050 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5051}
5052
5053
5054/**
5055 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5056 * FOP, and then pops the stack.
5057 *
5058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5059 * @param pResult The result to store.
5060 * @param iStReg Which FPU register to store it in.
5061 * @param uFpuOpcode The FPU opcode value.
5062 */
5063void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5064{
5065 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5066 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5067 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5068 iemFpuMaybePopOne(pFpuCtx);
5069}
5070
5071
5072/**
5073 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5074 * FPUDP, and FPUDS.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 * @param pResult The result to store.
5078 * @param iStReg Which FPU register to store it in.
5079 * @param iEffSeg The effective memory operand selector register.
5080 * @param GCPtrEff The effective memory operand offset.
5081 * @param uFpuOpcode The FPU opcode value.
5082 */
5083void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5084 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5085{
5086 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5087 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5088 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5089 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5090}
5091
5092
5093/**
5094 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5095 * FPUDP, and FPUDS, and then pops the stack.
5096 *
5097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5098 * @param pResult The result to store.
5099 * @param iStReg Which FPU register to store it in.
5100 * @param iEffSeg The effective memory operand selector register.
5101 * @param GCPtrEff The effective memory operand offset.
5102 * @param uFpuOpcode The FPU opcode value.
5103 */
5104void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5105 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5106{
5107 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5108 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5109 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5110 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5111 iemFpuMaybePopOne(pFpuCtx);
5112}
5113
5114
5115/**
5116 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5117 *
5118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5119 * @param uFpuOpcode The FPU opcode value.
5120 */
5121void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5122{
5123 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5124 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5125}
5126
5127
5128/**
5129 * Updates the FSW, FOP, FPUIP, and FPUCS.
5130 *
5131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5132 * @param u16FSW The FSW from the current instruction.
5133 * @param uFpuOpcode The FPU opcode value.
5134 */
5135void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5136{
5137 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5138 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5139 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5140}
5141
5142
5143/**
5144 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5145 *
5146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5147 * @param u16FSW The FSW from the current instruction.
5148 * @param uFpuOpcode The FPU opcode value.
5149 */
5150void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5151{
5152 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5153 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5154 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5155 iemFpuMaybePopOne(pFpuCtx);
5156}
5157
5158
5159/**
5160 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5161 *
5162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5163 * @param u16FSW The FSW from the current instruction.
5164 * @param iEffSeg The effective memory operand selector register.
5165 * @param GCPtrEff The effective memory operand offset.
5166 * @param uFpuOpcode The FPU opcode value.
5167 */
5168void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5169{
5170 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5171 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5172 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5173 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5174}
5175
5176
5177/**
5178 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5179 *
5180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5181 * @param u16FSW The FSW from the current instruction.
5182 * @param uFpuOpcode The FPU opcode value.
5183 */
5184void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5185{
5186 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5187 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5188 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5189 iemFpuMaybePopOne(pFpuCtx);
5190 iemFpuMaybePopOne(pFpuCtx);
5191}
5192
5193
5194/**
5195 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param u16FSW The FSW from the current instruction.
5199 * @param iEffSeg The effective memory operand selector register.
5200 * @param GCPtrEff The effective memory operand offset.
5201 * @param uFpuOpcode The FPU opcode value.
5202 */
5203void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5204 uint16_t uFpuOpcode) RT_NOEXCEPT
5205{
5206 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5207 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5208 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5209 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5210 iemFpuMaybePopOne(pFpuCtx);
5211}
5212
5213
5214/**
5215 * Worker routine for raising an FPU stack underflow exception.
5216 *
5217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5218 * @param pFpuCtx The FPU context.
5219 * @param iStReg The stack register being accessed.
5220 */
5221static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5222{
5223 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5224 if (pFpuCtx->FCW & X86_FCW_IM)
5225 {
5226 /* Masked underflow. */
5227 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5228 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5229 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5230 if (iStReg != UINT8_MAX)
5231 {
5232 pFpuCtx->FTW |= RT_BIT(iReg);
5233 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5234 }
5235 }
5236 else
5237 {
5238 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5239 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5240 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5241 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5242 }
5243 RT_NOREF(pVCpu);
5244}
5245
5246
5247/**
5248 * Raises a FPU stack underflow exception.
5249 *
5250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5251 * @param iStReg The destination register that should be loaded
5252 * with QNaN if \#IS is not masked. Specify
5253 * UINT8_MAX if none (like for fcom).
5254 * @param uFpuOpcode The FPU opcode value.
5255 */
5256void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5257{
5258 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5259 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5260 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5261}
5262
5263
5264void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5265{
5266 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5267 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5268 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5269 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5270}
5271
5272
5273void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5274{
5275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5276 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5277 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5278 iemFpuMaybePopOne(pFpuCtx);
5279}
5280
5281
5282void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5283 uint16_t uFpuOpcode) RT_NOEXCEPT
5284{
5285 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5286 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5287 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5288 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5289 iemFpuMaybePopOne(pFpuCtx);
5290}
5291
5292
5293void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5294{
5295 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5296 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5297 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5298 iemFpuMaybePopOne(pFpuCtx);
5299 iemFpuMaybePopOne(pFpuCtx);
5300}
5301
5302
5303void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5304{
5305 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5306 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5307
5308 if (pFpuCtx->FCW & X86_FCW_IM)
5309 {
5310 /* Masked overflow - Push QNaN. */
5311 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5312 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5313 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5314 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5315 pFpuCtx->FTW |= RT_BIT(iNewTop);
5316 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5317 iemFpuRotateStackPush(pFpuCtx);
5318 }
5319 else
5320 {
5321 /* Exception pending - don't change TOP or the register stack. */
5322 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5323 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5324 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5325 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5326 }
5327}
5328
5329
5330void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5331{
5332 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5333 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5334
5335 if (pFpuCtx->FCW & X86_FCW_IM)
5336 {
5337 /* Masked overflow - Push QNaN. */
5338 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5339 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5340 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5341 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5342 pFpuCtx->FTW |= RT_BIT(iNewTop);
5343 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5345 iemFpuRotateStackPush(pFpuCtx);
5346 }
5347 else
5348 {
5349 /* Exception pending - don't change TOP or the register stack. */
5350 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5351 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5352 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5353 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5354 }
5355}
5356
5357
5358/**
5359 * Worker routine for raising an FPU stack overflow exception on a push.
5360 *
5361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5362 * @param pFpuCtx The FPU context.
5363 */
5364static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5365{
5366 if (pFpuCtx->FCW & X86_FCW_IM)
5367 {
5368 /* Masked overflow. */
5369 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5370 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5371 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5372 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5373 pFpuCtx->FTW |= RT_BIT(iNewTop);
5374 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5375 iemFpuRotateStackPush(pFpuCtx);
5376 }
5377 else
5378 {
5379 /* Exception pending - don't change TOP or the register stack. */
5380 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5381 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5382 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5383 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5384 }
5385 RT_NOREF(pVCpu);
5386}
5387
5388
5389/**
5390 * Raises a FPU stack overflow exception on a push.
5391 *
5392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5393 * @param uFpuOpcode The FPU opcode value.
5394 */
5395void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5396{
5397 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5398 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5399 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5400}
5401
5402
5403/**
5404 * Raises a FPU stack overflow exception on a push with a memory operand.
5405 *
5406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5407 * @param iEffSeg The effective memory operand selector register.
5408 * @param GCPtrEff The effective memory operand offset.
5409 * @param uFpuOpcode The FPU opcode value.
5410 */
5411void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5412{
5413 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5414 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5415 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5416 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5417}
5418
5419/** @} */
5420
5421
5422/** @name SSE+AVX SIMD access and helpers.
5423 *
5424 * @{
5425 */
5426/**
5427 * Stores a result in a SIMD XMM register, updates the MXCSR.
5428 *
5429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5430 * @param pResult The result to store.
5431 * @param iXmmReg Which SIMD XMM register to store the result in.
5432 */
5433void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5434{
5435 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5436 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5437
5438 /* The result is only updated if there is no unmasked exception pending. */
5439 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5440 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5441 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5442}
5443
5444
5445/**
5446 * Updates the MXCSR.
5447 *
5448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5449 * @param fMxcsr The new MXCSR value.
5450 */
5451void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5452{
5453 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5454 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5455}
5456/** @} */
5457
5458
5459/** @name Memory access.
5460 *
5461 * @{
5462 */
5463
5464
5465/**
5466 * Updates the IEMCPU::cbWritten counter if applicable.
5467 *
5468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5469 * @param fAccess The access being accounted for.
5470 * @param cbMem The access size.
5471 */
5472DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5473{
5474 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5475 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5476 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5477}
5478
5479
5480/**
5481 * Applies the segment limit, base and attributes.
5482 *
5483 * This may raise a \#GP or \#SS.
5484 *
5485 * @returns VBox strict status code.
5486 *
5487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5488 * @param fAccess The kind of access which is being performed.
5489 * @param iSegReg The index of the segment register to apply.
5490 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5491 * TSS, ++).
5492 * @param cbMem The access size.
5493 * @param pGCPtrMem Pointer to the guest memory address to apply
5494 * segmentation to. Input and output parameter.
5495 */
5496VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5497{
5498 if (iSegReg == UINT8_MAX)
5499 return VINF_SUCCESS;
5500
5501 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5502 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5503 switch (IEM_GET_CPU_MODE(pVCpu))
5504 {
5505 case IEMMODE_16BIT:
5506 case IEMMODE_32BIT:
5507 {
5508 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5509 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5510
5511 if ( pSel->Attr.n.u1Present
5512 && !pSel->Attr.n.u1Unusable)
5513 {
5514 Assert(pSel->Attr.n.u1DescType);
5515 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5516 {
5517 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5518 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5519 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5520
5521 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5522 {
5523 /** @todo CPL check. */
5524 }
5525
5526 /*
5527 * There are two kinds of data selectors, normal and expand down.
5528 */
5529 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5530 {
5531 if ( GCPtrFirst32 > pSel->u32Limit
5532 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5533 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5534 }
5535 else
5536 {
5537 /*
5538 * The upper boundary is defined by the B bit, not the G bit!
5539 */
5540 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5541 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5542 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5543 }
5544 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5545 }
5546 else
5547 {
5548 /*
5549 * Code selector and usually be used to read thru, writing is
5550 * only permitted in real and V8086 mode.
5551 */
5552 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5553 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5554 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5555 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5556 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5557
5558 if ( GCPtrFirst32 > pSel->u32Limit
5559 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5560 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5561
5562 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5563 {
5564 /** @todo CPL check. */
5565 }
5566
5567 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5568 }
5569 }
5570 else
5571 return iemRaiseGeneralProtectionFault0(pVCpu);
5572 return VINF_SUCCESS;
5573 }
5574
5575 case IEMMODE_64BIT:
5576 {
5577 RTGCPTR GCPtrMem = *pGCPtrMem;
5578 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5579 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5580
5581 Assert(cbMem >= 1);
5582 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5583 return VINF_SUCCESS;
5584 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5585 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5586 return iemRaiseGeneralProtectionFault0(pVCpu);
5587 }
5588
5589 default:
5590 AssertFailedReturn(VERR_IEM_IPE_7);
5591 }
5592}
5593
5594
5595/**
5596 * Translates a virtual address to a physical physical address and checks if we
5597 * can access the page as specified.
5598 *
5599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5600 * @param GCPtrMem The virtual address.
5601 * @param cbAccess The access size, for raising \#PF correctly for
5602 * FXSAVE and such.
5603 * @param fAccess The intended access.
5604 * @param pGCPhysMem Where to return the physical address.
5605 */
5606VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5607 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5608{
5609 /** @todo Need a different PGM interface here. We're currently using
5610 * generic / REM interfaces. this won't cut it for R0. */
5611 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5612 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5613 * here. */
5614 PGMPTWALK Walk;
5615 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5616 if (RT_FAILURE(rc))
5617 {
5618 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5619 /** @todo Check unassigned memory in unpaged mode. */
5620 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5621#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5622 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5623 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5624#endif
5625 *pGCPhysMem = NIL_RTGCPHYS;
5626 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5627 }
5628
5629 /* If the page is writable and does not have the no-exec bit set, all
5630 access is allowed. Otherwise we'll have to check more carefully... */
5631 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5632 {
5633 /* Write to read only memory? */
5634 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5635 && !(Walk.fEffective & X86_PTE_RW)
5636 && ( ( IEM_GET_CPL(pVCpu) == 3
5637 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5638 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5639 {
5640 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5641 *pGCPhysMem = NIL_RTGCPHYS;
5642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5643 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5644 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5645#endif
5646 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5647 }
5648
5649 /* Kernel memory accessed by userland? */
5650 if ( !(Walk.fEffective & X86_PTE_US)
5651 && IEM_GET_CPL(pVCpu) == 3
5652 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5653 {
5654 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5655 *pGCPhysMem = NIL_RTGCPHYS;
5656#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5657 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5658 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5659#endif
5660 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5661 }
5662
5663 /* Executing non-executable memory? */
5664 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5665 && (Walk.fEffective & X86_PTE_PAE_NX)
5666 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5667 {
5668 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5669 *pGCPhysMem = NIL_RTGCPHYS;
5670#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5671 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5672 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5673#endif
5674 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5675 VERR_ACCESS_DENIED);
5676 }
5677 }
5678
5679 /*
5680 * Set the dirty / access flags.
5681 * ASSUMES this is set when the address is translated rather than on committ...
5682 */
5683 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5684 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5685 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5686 {
5687 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5688 AssertRC(rc2);
5689 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5690 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5691 }
5692
5693 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5694 *pGCPhysMem = GCPhys;
5695 return VINF_SUCCESS;
5696}
5697
5698
5699/**
5700 * Looks up a memory mapping entry.
5701 *
5702 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5704 * @param pvMem The memory address.
5705 * @param fAccess The access to.
5706 */
5707DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5708{
5709 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5710 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5711 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5712 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5713 return 0;
5714 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5715 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5716 return 1;
5717 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5718 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5719 return 2;
5720 return VERR_NOT_FOUND;
5721}
5722
5723
5724/**
5725 * Finds a free memmap entry when using iNextMapping doesn't work.
5726 *
5727 * @returns Memory mapping index, 1024 on failure.
5728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5729 */
5730static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5731{
5732 /*
5733 * The easy case.
5734 */
5735 if (pVCpu->iem.s.cActiveMappings == 0)
5736 {
5737 pVCpu->iem.s.iNextMapping = 1;
5738 return 0;
5739 }
5740
5741 /* There should be enough mappings for all instructions. */
5742 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5743
5744 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5745 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5746 return i;
5747
5748 AssertFailedReturn(1024);
5749}
5750
5751
5752/**
5753 * Commits a bounce buffer that needs writing back and unmaps it.
5754 *
5755 * @returns Strict VBox status code.
5756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5757 * @param iMemMap The index of the buffer to commit.
5758 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5759 * Always false in ring-3, obviously.
5760 */
5761static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5762{
5763 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5765#ifdef IN_RING3
5766 Assert(!fPostponeFail);
5767 RT_NOREF_PV(fPostponeFail);
5768#endif
5769
5770 /*
5771 * Do the writing.
5772 */
5773 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5774 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5775 {
5776 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5777 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5778 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5779 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5780 {
5781 /*
5782 * Carefully and efficiently dealing with access handler return
5783 * codes make this a little bloated.
5784 */
5785 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5786 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5787 pbBuf,
5788 cbFirst,
5789 PGMACCESSORIGIN_IEM);
5790 if (rcStrict == VINF_SUCCESS)
5791 {
5792 if (cbSecond)
5793 {
5794 rcStrict = PGMPhysWrite(pVM,
5795 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5796 pbBuf + cbFirst,
5797 cbSecond,
5798 PGMACCESSORIGIN_IEM);
5799 if (rcStrict == VINF_SUCCESS)
5800 { /* nothing */ }
5801 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5802 {
5803 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5804 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5806 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5807 }
5808#ifndef IN_RING3
5809 else if (fPostponeFail)
5810 {
5811 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5812 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5814 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5815 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5816 return iemSetPassUpStatus(pVCpu, rcStrict);
5817 }
5818#endif
5819 else
5820 {
5821 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5824 return rcStrict;
5825 }
5826 }
5827 }
5828 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5829 {
5830 if (!cbSecond)
5831 {
5832 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5834 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5835 }
5836 else
5837 {
5838 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5840 pbBuf + cbFirst,
5841 cbSecond,
5842 PGMACCESSORIGIN_IEM);
5843 if (rcStrict2 == VINF_SUCCESS)
5844 {
5845 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5848 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5849 }
5850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5851 {
5852 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5855 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5856 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5857 }
5858#ifndef IN_RING3
5859 else if (fPostponeFail)
5860 {
5861 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5864 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5865 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5866 return iemSetPassUpStatus(pVCpu, rcStrict);
5867 }
5868#endif
5869 else
5870 {
5871 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5874 return rcStrict2;
5875 }
5876 }
5877 }
5878#ifndef IN_RING3
5879 else if (fPostponeFail)
5880 {
5881 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5884 if (!cbSecond)
5885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5886 else
5887 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5888 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5889 return iemSetPassUpStatus(pVCpu, rcStrict);
5890 }
5891#endif
5892 else
5893 {
5894 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5897 return rcStrict;
5898 }
5899 }
5900 else
5901 {
5902 /*
5903 * No access handlers, much simpler.
5904 */
5905 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5906 if (RT_SUCCESS(rc))
5907 {
5908 if (cbSecond)
5909 {
5910 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5911 if (RT_SUCCESS(rc))
5912 { /* likely */ }
5913 else
5914 {
5915 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5916 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5918 return rc;
5919 }
5920 }
5921 }
5922 else
5923 {
5924 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5927 return rc;
5928 }
5929 }
5930 }
5931
5932#if defined(IEM_LOG_MEMORY_WRITES)
5933 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5934 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5935 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5936 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5937 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5938 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5939
5940 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5941 g_cbIemWrote = cbWrote;
5942 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5943#endif
5944
5945 /*
5946 * Free the mapping entry.
5947 */
5948 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5949 Assert(pVCpu->iem.s.cActiveMappings != 0);
5950 pVCpu->iem.s.cActiveMappings--;
5951 return VINF_SUCCESS;
5952}
5953
5954
5955/**
5956 * iemMemMap worker that deals with a request crossing pages.
5957 */
5958static VBOXSTRICTRC
5959iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5960{
5961 Assert(cbMem <= GUEST_PAGE_SIZE);
5962
5963 /*
5964 * Do the address translations.
5965 */
5966 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5967 RTGCPHYS GCPhysFirst;
5968 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5969 if (rcStrict != VINF_SUCCESS)
5970 return rcStrict;
5971 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5972
5973 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5974 RTGCPHYS GCPhysSecond;
5975 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5976 cbSecondPage, fAccess, &GCPhysSecond);
5977 if (rcStrict != VINF_SUCCESS)
5978 return rcStrict;
5979 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5980 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5981
5982 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5983
5984 /*
5985 * Read in the current memory content if it's a read, execute or partial
5986 * write access.
5987 */
5988 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5989
5990 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5991 {
5992 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5993 {
5994 /*
5995 * Must carefully deal with access handler status codes here,
5996 * makes the code a bit bloated.
5997 */
5998 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5999 if (rcStrict == VINF_SUCCESS)
6000 {
6001 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6002 if (rcStrict == VINF_SUCCESS)
6003 { /*likely */ }
6004 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6005 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6006 else
6007 {
6008 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6009 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6010 return rcStrict;
6011 }
6012 }
6013 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6014 {
6015 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6016 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6017 {
6018 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6019 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6020 }
6021 else
6022 {
6023 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6024 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6025 return rcStrict2;
6026 }
6027 }
6028 else
6029 {
6030 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6031 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6032 return rcStrict;
6033 }
6034 }
6035 else
6036 {
6037 /*
6038 * No informational status codes here, much more straight forward.
6039 */
6040 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6041 if (RT_SUCCESS(rc))
6042 {
6043 Assert(rc == VINF_SUCCESS);
6044 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6045 if (RT_SUCCESS(rc))
6046 Assert(rc == VINF_SUCCESS);
6047 else
6048 {
6049 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6050 return rc;
6051 }
6052 }
6053 else
6054 {
6055 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6056 return rc;
6057 }
6058 }
6059 }
6060#ifdef VBOX_STRICT
6061 else
6062 memset(pbBuf, 0xcc, cbMem);
6063 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6064 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6065#endif
6066 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6067
6068 /*
6069 * Commit the bounce buffer entry.
6070 */
6071 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6075 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6076 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6077 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6078 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6079 pVCpu->iem.s.cActiveMappings++;
6080
6081 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6082 *ppvMem = pbBuf;
6083 return VINF_SUCCESS;
6084}
6085
6086
6087/**
6088 * iemMemMap woker that deals with iemMemPageMap failures.
6089 */
6090static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6091 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6092{
6093 /*
6094 * Filter out conditions we can handle and the ones which shouldn't happen.
6095 */
6096 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6097 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6098 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6099 {
6100 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6101 return rcMap;
6102 }
6103 pVCpu->iem.s.cPotentialExits++;
6104
6105 /*
6106 * Read in the current memory content if it's a read, execute or partial
6107 * write access.
6108 */
6109 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6110 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6111 {
6112 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6113 memset(pbBuf, 0xff, cbMem);
6114 else
6115 {
6116 int rc;
6117 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6118 {
6119 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6120 if (rcStrict == VINF_SUCCESS)
6121 { /* nothing */ }
6122 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6123 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6124 else
6125 {
6126 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6127 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6128 return rcStrict;
6129 }
6130 }
6131 else
6132 {
6133 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6134 if (RT_SUCCESS(rc))
6135 { /* likely */ }
6136 else
6137 {
6138 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6139 GCPhysFirst, rc));
6140 return rc;
6141 }
6142 }
6143 }
6144 }
6145#ifdef VBOX_STRICT
6146 else
6147 memset(pbBuf, 0xcc, cbMem);
6148#endif
6149#ifdef VBOX_STRICT
6150 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6151 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6152#endif
6153
6154 /*
6155 * Commit the bounce buffer entry.
6156 */
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6162 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6163 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6164 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6165 pVCpu->iem.s.cActiveMappings++;
6166
6167 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6168 *ppvMem = pbBuf;
6169 return VINF_SUCCESS;
6170}
6171
6172
6173
6174/**
6175 * Maps the specified guest memory for the given kind of access.
6176 *
6177 * This may be using bounce buffering of the memory if it's crossing a page
6178 * boundary or if there is an access handler installed for any of it. Because
6179 * of lock prefix guarantees, we're in for some extra clutter when this
6180 * happens.
6181 *
6182 * This may raise a \#GP, \#SS, \#PF or \#AC.
6183 *
6184 * @returns VBox strict status code.
6185 *
6186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6187 * @param ppvMem Where to return the pointer to the mapped memory.
6188 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6189 * 8, 12, 16, 32 or 512. When used by string operations
6190 * it can be up to a page.
6191 * @param iSegReg The index of the segment register to use for this
6192 * access. The base and limits are checked. Use UINT8_MAX
6193 * to indicate that no segmentation is required (for IDT,
6194 * GDT and LDT accesses).
6195 * @param GCPtrMem The address of the guest memory.
6196 * @param fAccess How the memory is being accessed. The
6197 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6198 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6199 * when raising exceptions.
6200 * @param uAlignCtl Alignment control:
6201 * - Bits 15:0 is the alignment mask.
6202 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6203 * IEM_MEMMAP_F_ALIGN_SSE, and
6204 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6205 * Pass zero to skip alignment.
6206 */
6207VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6208 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6209{
6210 /*
6211 * Check the input and figure out which mapping entry to use.
6212 */
6213 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6214 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6215 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6216 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6217 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6218
6219 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6220 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6221 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6222 {
6223 iMemMap = iemMemMapFindFree(pVCpu);
6224 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6225 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6226 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6227 pVCpu->iem.s.aMemMappings[2].fAccess),
6228 VERR_IEM_IPE_9);
6229 }
6230
6231 /*
6232 * Map the memory, checking that we can actually access it. If something
6233 * slightly complicated happens, fall back on bounce buffering.
6234 */
6235 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6236 if (rcStrict == VINF_SUCCESS)
6237 { /* likely */ }
6238 else
6239 return rcStrict;
6240
6241 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6242 { /* likely */ }
6243 else
6244 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6245
6246 /*
6247 * Alignment check.
6248 */
6249 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6250 { /* likelyish */ }
6251 else
6252 {
6253 /* Misaligned access. */
6254 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6255 {
6256 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6257 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6258 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6259 {
6260 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6261
6262 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6263 return iemRaiseAlignmentCheckException(pVCpu);
6264 }
6265 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6266 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6267 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6268 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6269 * that's what FXSAVE does on a 10980xe. */
6270 && iemMemAreAlignmentChecksEnabled(pVCpu))
6271 return iemRaiseAlignmentCheckException(pVCpu);
6272 else
6273 return iemRaiseGeneralProtectionFault0(pVCpu);
6274 }
6275 }
6276
6277#ifdef IEM_WITH_DATA_TLB
6278 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6279
6280 /*
6281 * Get the TLB entry for this page.
6282 */
6283 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6284 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6285 if (pTlbe->uTag == uTag)
6286 {
6287# ifdef VBOX_WITH_STATISTICS
6288 pVCpu->iem.s.DataTlb.cTlbHits++;
6289# endif
6290 }
6291 else
6292 {
6293 pVCpu->iem.s.DataTlb.cTlbMisses++;
6294 PGMPTWALK Walk;
6295 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6296 if (RT_FAILURE(rc))
6297 {
6298 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6299# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6300 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6301 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6302# endif
6303 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6304 }
6305
6306 Assert(Walk.fSucceeded);
6307 pTlbe->uTag = uTag;
6308 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6309 pTlbe->GCPhys = Walk.GCPhys;
6310 pTlbe->pbMappingR3 = NULL;
6311 }
6312
6313 /*
6314 * Check TLB page table level access flags.
6315 */
6316 /* If the page is either supervisor only or non-writable, we need to do
6317 more careful access checks. */
6318 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6319 {
6320 /* Write to read only memory? */
6321 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6322 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6323 && ( ( IEM_GET_CPL(pVCpu) == 3
6324 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6325 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6326 {
6327 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6328# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6329 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6330 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6331# endif
6332 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6333 }
6334
6335 /* Kernel memory accessed by userland? */
6336 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6337 && IEM_GET_CPL(pVCpu) == 3
6338 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6339 {
6340 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6341# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6342 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6343 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6344# endif
6345 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6346 }
6347 }
6348
6349 /*
6350 * Set the dirty / access flags.
6351 * ASSUMES this is set when the address is translated rather than on commit...
6352 */
6353 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6354 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6355 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6356 {
6357 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6358 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6359 AssertRC(rc2);
6360 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6361 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6362 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6363 }
6364
6365 /*
6366 * Look up the physical page info if necessary.
6367 */
6368 uint8_t *pbMem = NULL;
6369 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6370# ifdef IN_RING3
6371 pbMem = pTlbe->pbMappingR3;
6372# else
6373 pbMem = NULL;
6374# endif
6375 else
6376 {
6377 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6379 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6381 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6382 { /* likely */ }
6383 else
6384 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6385 pTlbe->pbMappingR3 = NULL;
6386 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6387 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6388 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6389 &pbMem, &pTlbe->fFlagsAndPhysRev);
6390 AssertRCReturn(rc, rc);
6391# ifdef IN_RING3
6392 pTlbe->pbMappingR3 = pbMem;
6393# endif
6394 }
6395
6396 /*
6397 * Check the physical page level access and mapping.
6398 */
6399 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6400 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6401 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6402 { /* probably likely */ }
6403 else
6404 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6405 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6406 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6407 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6408 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6409 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6410
6411 if (pbMem)
6412 {
6413 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6414 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6415 fAccess |= IEM_ACCESS_NOT_LOCKED;
6416 }
6417 else
6418 {
6419 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6420 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6421 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6422 if (rcStrict != VINF_SUCCESS)
6423 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6424 }
6425
6426 void * const pvMem = pbMem;
6427
6428 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6429 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6430 if (fAccess & IEM_ACCESS_TYPE_READ)
6431 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6432
6433#else /* !IEM_WITH_DATA_TLB */
6434
6435 RTGCPHYS GCPhysFirst;
6436 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6437 if (rcStrict != VINF_SUCCESS)
6438 return rcStrict;
6439
6440 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6441 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6442 if (fAccess & IEM_ACCESS_TYPE_READ)
6443 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6444
6445 void *pvMem;
6446 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6447 if (rcStrict != VINF_SUCCESS)
6448 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6449
6450#endif /* !IEM_WITH_DATA_TLB */
6451
6452 /*
6453 * Fill in the mapping table entry.
6454 */
6455 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6456 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6457 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6458 pVCpu->iem.s.cActiveMappings += 1;
6459
6460 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6461 *ppvMem = pvMem;
6462
6463 return VINF_SUCCESS;
6464}
6465
6466
6467/**
6468 * Commits the guest memory if bounce buffered and unmaps it.
6469 *
6470 * @returns Strict VBox status code.
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 * @param pvMem The mapping.
6473 * @param fAccess The kind of access.
6474 */
6475VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6476{
6477 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6478 AssertReturn(iMemMap >= 0, iMemMap);
6479
6480 /* If it's bounce buffered, we may need to write back the buffer. */
6481 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6482 {
6483 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6484 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6485 }
6486 /* Otherwise unlock it. */
6487 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6488 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6489
6490 /* Free the entry. */
6491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6492 Assert(pVCpu->iem.s.cActiveMappings != 0);
6493 pVCpu->iem.s.cActiveMappings--;
6494 return VINF_SUCCESS;
6495}
6496
6497#ifdef IEM_WITH_SETJMP
6498
6499/**
6500 * Maps the specified guest memory for the given kind of access, longjmp on
6501 * error.
6502 *
6503 * This may be using bounce buffering of the memory if it's crossing a page
6504 * boundary or if there is an access handler installed for any of it. Because
6505 * of lock prefix guarantees, we're in for some extra clutter when this
6506 * happens.
6507 *
6508 * This may raise a \#GP, \#SS, \#PF or \#AC.
6509 *
6510 * @returns Pointer to the mapped memory.
6511 *
6512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6513 * @param cbMem The number of bytes to map. This is usually 1,
6514 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6515 * string operations it can be up to a page.
6516 * @param iSegReg The index of the segment register to use for
6517 * this access. The base and limits are checked.
6518 * Use UINT8_MAX to indicate that no segmentation
6519 * is required (for IDT, GDT and LDT accesses).
6520 * @param GCPtrMem The address of the guest memory.
6521 * @param fAccess How the memory is being accessed. The
6522 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6523 * how to map the memory, while the
6524 * IEM_ACCESS_WHAT_XXX bit is used when raising
6525 * exceptions.
6526 * @param uAlignCtl Alignment control:
6527 * - Bits 15:0 is the alignment mask.
6528 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6529 * IEM_MEMMAP_F_ALIGN_SSE, and
6530 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6531 * Pass zero to skip alignment.
6532 */
6533void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6534 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6535{
6536 /*
6537 * Check the input, check segment access and adjust address
6538 * with segment base.
6539 */
6540 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6541 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6542 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6543
6544 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6545 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6546 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6547
6548 /*
6549 * Alignment check.
6550 */
6551 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6552 { /* likelyish */ }
6553 else
6554 {
6555 /* Misaligned access. */
6556 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6557 {
6558 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6559 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6560 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6561 {
6562 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6563
6564 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6565 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6566 }
6567 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6568 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6569 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6570 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6571 * that's what FXSAVE does on a 10980xe. */
6572 && iemMemAreAlignmentChecksEnabled(pVCpu))
6573 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6574 else
6575 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6576 }
6577 }
6578
6579 /*
6580 * Figure out which mapping entry to use.
6581 */
6582 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6583 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6584 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6585 {
6586 iMemMap = iemMemMapFindFree(pVCpu);
6587 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6588 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6589 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6590 pVCpu->iem.s.aMemMappings[2].fAccess),
6591 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6592 }
6593
6594 /*
6595 * Crossing a page boundary?
6596 */
6597 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6598 { /* No (likely). */ }
6599 else
6600 {
6601 void *pvMem;
6602 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6603 if (rcStrict == VINF_SUCCESS)
6604 return pvMem;
6605 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6606 }
6607
6608#ifdef IEM_WITH_DATA_TLB
6609 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6610
6611 /*
6612 * Get the TLB entry for this page.
6613 */
6614 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6615 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6616 if (pTlbe->uTag == uTag)
6617 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6618 else
6619 {
6620 pVCpu->iem.s.DataTlb.cTlbMisses++;
6621 PGMPTWALK Walk;
6622 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6623 if (RT_FAILURE(rc))
6624 {
6625 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6626# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6627 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6628 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6629# endif
6630 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6631 }
6632
6633 Assert(Walk.fSucceeded);
6634 pTlbe->uTag = uTag;
6635 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6636 pTlbe->GCPhys = Walk.GCPhys;
6637 pTlbe->pbMappingR3 = NULL;
6638 }
6639
6640 /*
6641 * Check the flags and physical revision.
6642 */
6643 /** @todo make the caller pass these in with fAccess. */
6644 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6645 ? IEMTLBE_F_PT_NO_USER : 0;
6646 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6647 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6648 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6649 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6650 ? IEMTLBE_F_PT_NO_WRITE : 0)
6651 : 0;
6652 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6653 uint8_t *pbMem = NULL;
6654 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6655 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6656# ifdef IN_RING3
6657 pbMem = pTlbe->pbMappingR3;
6658# else
6659 pbMem = NULL;
6660# endif
6661 else
6662 {
6663 /*
6664 * Okay, something isn't quite right or needs refreshing.
6665 */
6666 /* Write to read only memory? */
6667 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6668 {
6669 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6670# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6671 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6672 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6673# endif
6674 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6675 }
6676
6677 /* Kernel memory accessed by userland? */
6678 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6679 {
6680 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6681# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6682 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6683 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6684# endif
6685 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6686 }
6687
6688 /* Set the dirty / access flags.
6689 ASSUMES this is set when the address is translated rather than on commit... */
6690 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6691 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6692 {
6693 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6694 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6695 AssertRC(rc2);
6696 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6697 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6698 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6699 }
6700
6701 /*
6702 * Check if the physical page info needs updating.
6703 */
6704 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6705# ifdef IN_RING3
6706 pbMem = pTlbe->pbMappingR3;
6707# else
6708 pbMem = NULL;
6709# endif
6710 else
6711 {
6712 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6714 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6715 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6716 pTlbe->pbMappingR3 = NULL;
6717 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6718 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6719 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6720 &pbMem, &pTlbe->fFlagsAndPhysRev);
6721 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6722# ifdef IN_RING3
6723 pTlbe->pbMappingR3 = pbMem;
6724# endif
6725 }
6726
6727 /*
6728 * Check the physical page level access and mapping.
6729 */
6730 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6731 { /* probably likely */ }
6732 else
6733 {
6734 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6735 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6736 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6737 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6738 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6739 if (rcStrict == VINF_SUCCESS)
6740 return pbMem;
6741 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6742 }
6743 }
6744 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6745
6746 if (pbMem)
6747 {
6748 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6749 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6750 fAccess |= IEM_ACCESS_NOT_LOCKED;
6751 }
6752 else
6753 {
6754 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6755 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6756 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6757 if (rcStrict == VINF_SUCCESS)
6758 return pbMem;
6759 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6760 }
6761
6762 void * const pvMem = pbMem;
6763
6764 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6765 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6766 if (fAccess & IEM_ACCESS_TYPE_READ)
6767 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6768
6769#else /* !IEM_WITH_DATA_TLB */
6770
6771
6772 RTGCPHYS GCPhysFirst;
6773 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6774 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6775 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6776
6777 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6778 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6779 if (fAccess & IEM_ACCESS_TYPE_READ)
6780 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6781
6782 void *pvMem;
6783 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6784 if (rcStrict == VINF_SUCCESS)
6785 { /* likely */ }
6786 else
6787 {
6788 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6789 if (rcStrict == VINF_SUCCESS)
6790 return pvMem;
6791 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6792 }
6793
6794#endif /* !IEM_WITH_DATA_TLB */
6795
6796 /*
6797 * Fill in the mapping table entry.
6798 */
6799 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6800 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6801 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6802 pVCpu->iem.s.cActiveMappings++;
6803
6804 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6805 return pvMem;
6806}
6807
6808
6809/**
6810 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6811 *
6812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6813 * @param pvMem The mapping.
6814 * @param fAccess The kind of access.
6815 */
6816void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6817{
6818 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6819 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6820
6821 /* If it's bounce buffered, we may need to write back the buffer. */
6822 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6823 {
6824 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6825 {
6826 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6827 if (rcStrict == VINF_SUCCESS)
6828 return;
6829 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6830 }
6831 }
6832 /* Otherwise unlock it. */
6833 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6834 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6835
6836 /* Free the entry. */
6837 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6838 Assert(pVCpu->iem.s.cActiveMappings != 0);
6839 pVCpu->iem.s.cActiveMappings--;
6840}
6841
6842#endif /* IEM_WITH_SETJMP */
6843
6844#ifndef IN_RING3
6845/**
6846 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6847 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6848 *
6849 * Allows the instruction to be completed and retired, while the IEM user will
6850 * return to ring-3 immediately afterwards and do the postponed writes there.
6851 *
6852 * @returns VBox status code (no strict statuses). Caller must check
6853 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 * @param pvMem The mapping.
6856 * @param fAccess The kind of access.
6857 */
6858VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6859{
6860 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6861 AssertReturn(iMemMap >= 0, iMemMap);
6862
6863 /* If it's bounce buffered, we may need to write back the buffer. */
6864 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6865 {
6866 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6867 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6868 }
6869 /* Otherwise unlock it. */
6870 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6871 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6872
6873 /* Free the entry. */
6874 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6875 Assert(pVCpu->iem.s.cActiveMappings != 0);
6876 pVCpu->iem.s.cActiveMappings--;
6877 return VINF_SUCCESS;
6878}
6879#endif
6880
6881
6882/**
6883 * Rollbacks mappings, releasing page locks and such.
6884 *
6885 * The caller shall only call this after checking cActiveMappings.
6886 *
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 */
6889void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6890{
6891 Assert(pVCpu->iem.s.cActiveMappings > 0);
6892
6893 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6894 while (iMemMap-- > 0)
6895 {
6896 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6897 if (fAccess != IEM_ACCESS_INVALID)
6898 {
6899 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6900 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6901 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6902 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6903 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6904 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6905 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6906 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6907 pVCpu->iem.s.cActiveMappings--;
6908 }
6909 }
6910}
6911
6912
6913/**
6914 * Fetches a data byte.
6915 *
6916 * @returns Strict VBox status code.
6917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6918 * @param pu8Dst Where to return the byte.
6919 * @param iSegReg The index of the segment register to use for
6920 * this access. The base and limits are checked.
6921 * @param GCPtrMem The address of the guest memory.
6922 */
6923VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6924{
6925 /* The lazy approach for now... */
6926 uint8_t const *pu8Src;
6927 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6928 if (rc == VINF_SUCCESS)
6929 {
6930 *pu8Dst = *pu8Src;
6931 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6932 }
6933 return rc;
6934}
6935
6936
6937#ifdef IEM_WITH_SETJMP
6938/**
6939 * Fetches a data byte, longjmp on error.
6940 *
6941 * @returns The byte.
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 * @param iSegReg The index of the segment register to use for
6944 * this access. The base and limits are checked.
6945 * @param GCPtrMem The address of the guest memory.
6946 */
6947uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6948{
6949 /* The lazy approach for now... */
6950 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6951 uint8_t const bRet = *pu8Src;
6952 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6953 return bRet;
6954}
6955#endif /* IEM_WITH_SETJMP */
6956
6957
6958/**
6959 * Fetches a data word.
6960 *
6961 * @returns Strict VBox status code.
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 * @param pu16Dst Where to return the word.
6964 * @param iSegReg The index of the segment register to use for
6965 * this access. The base and limits are checked.
6966 * @param GCPtrMem The address of the guest memory.
6967 */
6968VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6969{
6970 /* The lazy approach for now... */
6971 uint16_t const *pu16Src;
6972 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6973 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6974 if (rc == VINF_SUCCESS)
6975 {
6976 *pu16Dst = *pu16Src;
6977 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6978 }
6979 return rc;
6980}
6981
6982
6983#ifdef IEM_WITH_SETJMP
6984/**
6985 * Fetches a data word, longjmp on error.
6986 *
6987 * @returns The word
6988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6989 * @param iSegReg The index of the segment register to use for
6990 * this access. The base and limits are checked.
6991 * @param GCPtrMem The address of the guest memory.
6992 */
6993uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6994{
6995 /* The lazy approach for now... */
6996 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6997 sizeof(*pu16Src) - 1);
6998 uint16_t const u16Ret = *pu16Src;
6999 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7000 return u16Ret;
7001}
7002#endif
7003
7004
7005/**
7006 * Fetches a data dword.
7007 *
7008 * @returns Strict VBox status code.
7009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7010 * @param pu32Dst Where to return the dword.
7011 * @param iSegReg The index of the segment register to use for
7012 * this access. The base and limits are checked.
7013 * @param GCPtrMem The address of the guest memory.
7014 */
7015VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7016{
7017 /* The lazy approach for now... */
7018 uint32_t const *pu32Src;
7019 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7020 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7021 if (rc == VINF_SUCCESS)
7022 {
7023 *pu32Dst = *pu32Src;
7024 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7025 }
7026 return rc;
7027}
7028
7029
7030/**
7031 * Fetches a data dword and zero extends it to a qword.
7032 *
7033 * @returns Strict VBox status code.
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 * @param pu64Dst Where to return the qword.
7036 * @param iSegReg The index of the segment register to use for
7037 * this access. The base and limits are checked.
7038 * @param GCPtrMem The address of the guest memory.
7039 */
7040VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7041{
7042 /* The lazy approach for now... */
7043 uint32_t const *pu32Src;
7044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7045 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7046 if (rc == VINF_SUCCESS)
7047 {
7048 *pu64Dst = *pu32Src;
7049 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7050 }
7051 return rc;
7052}
7053
7054
7055#ifdef IEM_WITH_SETJMP
7056
7057/**
7058 * Fetches a data dword, longjmp on error, fallback/safe version.
7059 *
7060 * @returns The dword
7061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7062 * @param iSegReg The index of the segment register to use for
7063 * this access. The base and limits are checked.
7064 * @param GCPtrMem The address of the guest memory.
7065 */
7066uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7067{
7068 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7069 sizeof(*pu32Src) - 1);
7070 uint32_t const u32Ret = *pu32Src;
7071 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7072 return u32Ret;
7073}
7074
7075
7076/**
7077 * Fetches a data dword, longjmp on error.
7078 *
7079 * @returns The dword
7080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7081 * @param iSegReg The index of the segment register to use for
7082 * this access. The base and limits are checked.
7083 * @param GCPtrMem The address of the guest memory.
7084 */
7085uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7086{
7087# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7088 /*
7089 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7090 */
7091 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7092 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7093 {
7094 /*
7095 * TLB lookup.
7096 */
7097 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7098 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7099 if (pTlbe->uTag == uTag)
7100 {
7101 /*
7102 * Check TLB page table level access flags.
7103 */
7104 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7105 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7106 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7107 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7108 {
7109 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7110
7111 /*
7112 * Alignment check:
7113 */
7114 /** @todo check priority \#AC vs \#PF */
7115 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7116 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7117 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7118 || IEM_GET_CPL(pVCpu) != 3)
7119 {
7120 /*
7121 * Fetch and return the dword
7122 */
7123 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7124 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7125 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7126 }
7127 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7128 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7129 }
7130 }
7131 }
7132
7133 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7134 outdated page pointer, or other troubles. */
7135 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7136 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7137
7138# else
7139 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7140 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7141 uint32_t const u32Ret = *pu32Src;
7142 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7143 return u32Ret;
7144# endif
7145}
7146#endif
7147
7148
7149#ifdef SOME_UNUSED_FUNCTION
7150/**
7151 * Fetches a data dword and sign extends it to a qword.
7152 *
7153 * @returns Strict VBox status code.
7154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7155 * @param pu64Dst Where to return the sign extended value.
7156 * @param iSegReg The index of the segment register to use for
7157 * this access. The base and limits are checked.
7158 * @param GCPtrMem The address of the guest memory.
7159 */
7160VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7161{
7162 /* The lazy approach for now... */
7163 int32_t const *pi32Src;
7164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7165 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7166 if (rc == VINF_SUCCESS)
7167 {
7168 *pu64Dst = *pi32Src;
7169 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7170 }
7171#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7172 else
7173 *pu64Dst = 0;
7174#endif
7175 return rc;
7176}
7177#endif
7178
7179
7180/**
7181 * Fetches a data qword.
7182 *
7183 * @returns Strict VBox status code.
7184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7185 * @param pu64Dst Where to return the qword.
7186 * @param iSegReg The index of the segment register to use for
7187 * this access. The base and limits are checked.
7188 * @param GCPtrMem The address of the guest memory.
7189 */
7190VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7191{
7192 /* The lazy approach for now... */
7193 uint64_t const *pu64Src;
7194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7195 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7196 if (rc == VINF_SUCCESS)
7197 {
7198 *pu64Dst = *pu64Src;
7199 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7200 }
7201 return rc;
7202}
7203
7204
7205#ifdef IEM_WITH_SETJMP
7206/**
7207 * Fetches a data qword, longjmp on error.
7208 *
7209 * @returns The qword.
7210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7211 * @param iSegReg The index of the segment register to use for
7212 * this access. The base and limits are checked.
7213 * @param GCPtrMem The address of the guest memory.
7214 */
7215uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7216{
7217 /* The lazy approach for now... */
7218 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7219 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7220 uint64_t const u64Ret = *pu64Src;
7221 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7222 return u64Ret;
7223}
7224#endif
7225
7226
7227/**
7228 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7229 *
7230 * @returns Strict VBox status code.
7231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7232 * @param pu64Dst Where to return the qword.
7233 * @param iSegReg The index of the segment register to use for
7234 * this access. The base and limits are checked.
7235 * @param GCPtrMem The address of the guest memory.
7236 */
7237VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7238{
7239 /* The lazy approach for now... */
7240 uint64_t const *pu64Src;
7241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7242 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7243 if (rc == VINF_SUCCESS)
7244 {
7245 *pu64Dst = *pu64Src;
7246 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7247 }
7248 return rc;
7249}
7250
7251
7252#ifdef IEM_WITH_SETJMP
7253/**
7254 * Fetches a data qword, longjmp on error.
7255 *
7256 * @returns The qword.
7257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7258 * @param iSegReg The index of the segment register to use for
7259 * this access. The base and limits are checked.
7260 * @param GCPtrMem The address of the guest memory.
7261 */
7262uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7263{
7264 /* The lazy approach for now... */
7265 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7266 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7267 uint64_t const u64Ret = *pu64Src;
7268 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7269 return u64Ret;
7270}
7271#endif
7272
7273
7274/**
7275 * Fetches a data tword.
7276 *
7277 * @returns Strict VBox status code.
7278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7279 * @param pr80Dst Where to return the tword.
7280 * @param iSegReg The index of the segment register to use for
7281 * this access. The base and limits are checked.
7282 * @param GCPtrMem The address of the guest memory.
7283 */
7284VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7285{
7286 /* The lazy approach for now... */
7287 PCRTFLOAT80U pr80Src;
7288 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7289 if (rc == VINF_SUCCESS)
7290 {
7291 *pr80Dst = *pr80Src;
7292 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7293 }
7294 return rc;
7295}
7296
7297
7298#ifdef IEM_WITH_SETJMP
7299/**
7300 * Fetches a data tword, longjmp on error.
7301 *
7302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7303 * @param pr80Dst Where to return the tword.
7304 * @param iSegReg The index of the segment register to use for
7305 * this access. The base and limits are checked.
7306 * @param GCPtrMem The address of the guest memory.
7307 */
7308void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7309{
7310 /* The lazy approach for now... */
7311 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7312 *pr80Dst = *pr80Src;
7313 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7314}
7315#endif
7316
7317
7318/**
7319 * Fetches a data decimal tword.
7320 *
7321 * @returns Strict VBox status code.
7322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7323 * @param pd80Dst Where to return the tword.
7324 * @param iSegReg The index of the segment register to use for
7325 * this access. The base and limits are checked.
7326 * @param GCPtrMem The address of the guest memory.
7327 */
7328VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7329{
7330 /* The lazy approach for now... */
7331 PCRTPBCD80U pd80Src;
7332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7333 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7334 if (rc == VINF_SUCCESS)
7335 {
7336 *pd80Dst = *pd80Src;
7337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7338 }
7339 return rc;
7340}
7341
7342
7343#ifdef IEM_WITH_SETJMP
7344/**
7345 * Fetches a data decimal tword, longjmp on error.
7346 *
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pd80Dst Where to return the tword.
7349 * @param iSegReg The index of the segment register to use for
7350 * this access. The base and limits are checked.
7351 * @param GCPtrMem The address of the guest memory.
7352 */
7353void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7354{
7355 /* The lazy approach for now... */
7356 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7357 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7358 *pd80Dst = *pd80Src;
7359 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7360}
7361#endif
7362
7363
7364/**
7365 * Fetches a data dqword (double qword), generally SSE related.
7366 *
7367 * @returns Strict VBox status code.
7368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7369 * @param pu128Dst Where to return the qword.
7370 * @param iSegReg The index of the segment register to use for
7371 * this access. The base and limits are checked.
7372 * @param GCPtrMem The address of the guest memory.
7373 */
7374VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7375{
7376 /* The lazy approach for now... */
7377 PCRTUINT128U pu128Src;
7378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7379 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7380 if (rc == VINF_SUCCESS)
7381 {
7382 pu128Dst->au64[0] = pu128Src->au64[0];
7383 pu128Dst->au64[1] = pu128Src->au64[1];
7384 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7385 }
7386 return rc;
7387}
7388
7389
7390#ifdef IEM_WITH_SETJMP
7391/**
7392 * Fetches a data dqword (double qword), generally SSE related.
7393 *
7394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7395 * @param pu128Dst Where to return the qword.
7396 * @param iSegReg The index of the segment register to use for
7397 * this access. The base and limits are checked.
7398 * @param GCPtrMem The address of the guest memory.
7399 */
7400void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7401{
7402 /* The lazy approach for now... */
7403 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7404 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7405 pu128Dst->au64[0] = pu128Src->au64[0];
7406 pu128Dst->au64[1] = pu128Src->au64[1];
7407 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7408}
7409#endif
7410
7411
7412/**
7413 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7414 * related.
7415 *
7416 * Raises \#GP(0) if not aligned.
7417 *
7418 * @returns Strict VBox status code.
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param pu128Dst Where to return the qword.
7421 * @param iSegReg The index of the segment register to use for
7422 * this access. The base and limits are checked.
7423 * @param GCPtrMem The address of the guest memory.
7424 */
7425VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7426{
7427 /* The lazy approach for now... */
7428 PCRTUINT128U pu128Src;
7429 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7430 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7431 if (rc == VINF_SUCCESS)
7432 {
7433 pu128Dst->au64[0] = pu128Src->au64[0];
7434 pu128Dst->au64[1] = pu128Src->au64[1];
7435 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7436 }
7437 return rc;
7438}
7439
7440
7441#ifdef IEM_WITH_SETJMP
7442/**
7443 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7444 * related, longjmp on error.
7445 *
7446 * Raises \#GP(0) if not aligned.
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 * @param pu128Dst Where to return the qword.
7450 * @param iSegReg The index of the segment register to use for
7451 * this access. The base and limits are checked.
7452 * @param GCPtrMem The address of the guest memory.
7453 */
7454void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7455 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7456{
7457 /* The lazy approach for now... */
7458 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7459 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7460 pu128Dst->au64[0] = pu128Src->au64[0];
7461 pu128Dst->au64[1] = pu128Src->au64[1];
7462 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7463}
7464#endif
7465
7466
7467/**
7468 * Fetches a data oword (octo word), generally AVX related.
7469 *
7470 * @returns Strict VBox status code.
7471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7472 * @param pu256Dst Where to return the qword.
7473 * @param iSegReg The index of the segment register to use for
7474 * this access. The base and limits are checked.
7475 * @param GCPtrMem The address of the guest memory.
7476 */
7477VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7478{
7479 /* The lazy approach for now... */
7480 PCRTUINT256U pu256Src;
7481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7482 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7483 if (rc == VINF_SUCCESS)
7484 {
7485 pu256Dst->au64[0] = pu256Src->au64[0];
7486 pu256Dst->au64[1] = pu256Src->au64[1];
7487 pu256Dst->au64[2] = pu256Src->au64[2];
7488 pu256Dst->au64[3] = pu256Src->au64[3];
7489 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7490 }
7491 return rc;
7492}
7493
7494
7495#ifdef IEM_WITH_SETJMP
7496/**
7497 * Fetches a data oword (octo word), generally AVX related.
7498 *
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param pu256Dst Where to return the qword.
7501 * @param iSegReg The index of the segment register to use for
7502 * this access. The base and limits are checked.
7503 * @param GCPtrMem The address of the guest memory.
7504 */
7505void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7506{
7507 /* The lazy approach for now... */
7508 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7509 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7510 pu256Dst->au64[0] = pu256Src->au64[0];
7511 pu256Dst->au64[1] = pu256Src->au64[1];
7512 pu256Dst->au64[2] = pu256Src->au64[2];
7513 pu256Dst->au64[3] = pu256Src->au64[3];
7514 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7515}
7516#endif
7517
7518
7519/**
7520 * Fetches a data oword (octo word) at an aligned address, generally AVX
7521 * related.
7522 *
7523 * Raises \#GP(0) if not aligned.
7524 *
7525 * @returns Strict VBox status code.
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param pu256Dst Where to return the qword.
7528 * @param iSegReg The index of the segment register to use for
7529 * this access. The base and limits are checked.
7530 * @param GCPtrMem The address of the guest memory.
7531 */
7532VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7533{
7534 /* The lazy approach for now... */
7535 PCRTUINT256U pu256Src;
7536 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7537 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7538 if (rc == VINF_SUCCESS)
7539 {
7540 pu256Dst->au64[0] = pu256Src->au64[0];
7541 pu256Dst->au64[1] = pu256Src->au64[1];
7542 pu256Dst->au64[2] = pu256Src->au64[2];
7543 pu256Dst->au64[3] = pu256Src->au64[3];
7544 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7545 }
7546 return rc;
7547}
7548
7549
7550#ifdef IEM_WITH_SETJMP
7551/**
7552 * Fetches a data oword (octo word) at an aligned address, generally AVX
7553 * related, longjmp on error.
7554 *
7555 * Raises \#GP(0) if not aligned.
7556 *
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param pu256Dst Where to return the qword.
7559 * @param iSegReg The index of the segment register to use for
7560 * this access. The base and limits are checked.
7561 * @param GCPtrMem The address of the guest memory.
7562 */
7563void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7564 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7565{
7566 /* The lazy approach for now... */
7567 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7568 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7569 pu256Dst->au64[0] = pu256Src->au64[0];
7570 pu256Dst->au64[1] = pu256Src->au64[1];
7571 pu256Dst->au64[2] = pu256Src->au64[2];
7572 pu256Dst->au64[3] = pu256Src->au64[3];
7573 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7574}
7575#endif
7576
7577
7578
7579/**
7580 * Fetches a descriptor register (lgdt, lidt).
7581 *
7582 * @returns Strict VBox status code.
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param pcbLimit Where to return the limit.
7585 * @param pGCPtrBase Where to return the base.
7586 * @param iSegReg The index of the segment register to use for
7587 * this access. The base and limits are checked.
7588 * @param GCPtrMem The address of the guest memory.
7589 * @param enmOpSize The effective operand size.
7590 */
7591VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7592 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7593{
7594 /*
7595 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7596 * little special:
7597 * - The two reads are done separately.
7598 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7599 * - We suspect the 386 to actually commit the limit before the base in
7600 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7601 * don't try emulate this eccentric behavior, because it's not well
7602 * enough understood and rather hard to trigger.
7603 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7604 */
7605 VBOXSTRICTRC rcStrict;
7606 if (IEM_IS_64BIT_CODE(pVCpu))
7607 {
7608 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7609 if (rcStrict == VINF_SUCCESS)
7610 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7611 }
7612 else
7613 {
7614 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7615 if (enmOpSize == IEMMODE_32BIT)
7616 {
7617 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7618 {
7619 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7620 if (rcStrict == VINF_SUCCESS)
7621 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7622 }
7623 else
7624 {
7625 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7626 if (rcStrict == VINF_SUCCESS)
7627 {
7628 *pcbLimit = (uint16_t)uTmp;
7629 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7630 }
7631 }
7632 if (rcStrict == VINF_SUCCESS)
7633 *pGCPtrBase = uTmp;
7634 }
7635 else
7636 {
7637 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7638 if (rcStrict == VINF_SUCCESS)
7639 {
7640 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7641 if (rcStrict == VINF_SUCCESS)
7642 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7643 }
7644 }
7645 }
7646 return rcStrict;
7647}
7648
7649
7650
7651/**
7652 * Stores a data byte.
7653 *
7654 * @returns Strict VBox status code.
7655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7656 * @param iSegReg The index of the segment register to use for
7657 * this access. The base and limits are checked.
7658 * @param GCPtrMem The address of the guest memory.
7659 * @param u8Value The value to store.
7660 */
7661VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7662{
7663 /* The lazy approach for now... */
7664 uint8_t *pu8Dst;
7665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7666 if (rc == VINF_SUCCESS)
7667 {
7668 *pu8Dst = u8Value;
7669 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7670 }
7671 return rc;
7672}
7673
7674
7675#ifdef IEM_WITH_SETJMP
7676/**
7677 * Stores a data byte, longjmp on error.
7678 *
7679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7680 * @param iSegReg The index of the segment register to use for
7681 * this access. The base and limits are checked.
7682 * @param GCPtrMem The address of the guest memory.
7683 * @param u8Value The value to store.
7684 */
7685void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7686{
7687 /* The lazy approach for now... */
7688 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7689 *pu8Dst = u8Value;
7690 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7691}
7692#endif
7693
7694
7695/**
7696 * Stores a data word.
7697 *
7698 * @returns Strict VBox status code.
7699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7700 * @param iSegReg The index of the segment register to use for
7701 * this access. The base and limits are checked.
7702 * @param GCPtrMem The address of the guest memory.
7703 * @param u16Value The value to store.
7704 */
7705VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7706{
7707 /* The lazy approach for now... */
7708 uint16_t *pu16Dst;
7709 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7710 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7711 if (rc == VINF_SUCCESS)
7712 {
7713 *pu16Dst = u16Value;
7714 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7715 }
7716 return rc;
7717}
7718
7719
7720#ifdef IEM_WITH_SETJMP
7721/**
7722 * Stores a data word, longjmp on error.
7723 *
7724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7725 * @param iSegReg The index of the segment register to use for
7726 * this access. The base and limits are checked.
7727 * @param GCPtrMem The address of the guest memory.
7728 * @param u16Value The value to store.
7729 */
7730void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7731{
7732 /* The lazy approach for now... */
7733 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7734 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7735 *pu16Dst = u16Value;
7736 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7737}
7738#endif
7739
7740
7741/**
7742 * Stores a data dword.
7743 *
7744 * @returns Strict VBox status code.
7745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7746 * @param iSegReg The index of the segment register to use for
7747 * this access. The base and limits are checked.
7748 * @param GCPtrMem The address of the guest memory.
7749 * @param u32Value The value to store.
7750 */
7751VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7752{
7753 /* The lazy approach for now... */
7754 uint32_t *pu32Dst;
7755 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7756 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7757 if (rc == VINF_SUCCESS)
7758 {
7759 *pu32Dst = u32Value;
7760 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7761 }
7762 return rc;
7763}
7764
7765
7766#ifdef IEM_WITH_SETJMP
7767/**
7768 * Stores a data dword.
7769 *
7770 * @returns Strict VBox status code.
7771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7772 * @param iSegReg The index of the segment register to use for
7773 * this access. The base and limits are checked.
7774 * @param GCPtrMem The address of the guest memory.
7775 * @param u32Value The value to store.
7776 */
7777void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7778{
7779 /* The lazy approach for now... */
7780 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7781 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7782 *pu32Dst = u32Value;
7783 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7784}
7785#endif
7786
7787
7788/**
7789 * Stores a data qword.
7790 *
7791 * @returns Strict VBox status code.
7792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7793 * @param iSegReg The index of the segment register to use for
7794 * this access. The base and limits are checked.
7795 * @param GCPtrMem The address of the guest memory.
7796 * @param u64Value The value to store.
7797 */
7798VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7799{
7800 /* The lazy approach for now... */
7801 uint64_t *pu64Dst;
7802 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7803 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7804 if (rc == VINF_SUCCESS)
7805 {
7806 *pu64Dst = u64Value;
7807 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7808 }
7809 return rc;
7810}
7811
7812
7813#ifdef IEM_WITH_SETJMP
7814/**
7815 * Stores a data qword, longjmp on error.
7816 *
7817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7818 * @param iSegReg The index of the segment register to use for
7819 * this access. The base and limits are checked.
7820 * @param GCPtrMem The address of the guest memory.
7821 * @param u64Value The value to store.
7822 */
7823void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7824{
7825 /* The lazy approach for now... */
7826 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7827 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7828 *pu64Dst = u64Value;
7829 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7830}
7831#endif
7832
7833
7834/**
7835 * Stores a data dqword.
7836 *
7837 * @returns Strict VBox status code.
7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7839 * @param iSegReg The index of the segment register to use for
7840 * this access. The base and limits are checked.
7841 * @param GCPtrMem The address of the guest memory.
7842 * @param u128Value The value to store.
7843 */
7844VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7845{
7846 /* The lazy approach for now... */
7847 PRTUINT128U pu128Dst;
7848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7849 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7850 if (rc == VINF_SUCCESS)
7851 {
7852 pu128Dst->au64[0] = u128Value.au64[0];
7853 pu128Dst->au64[1] = u128Value.au64[1];
7854 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7855 }
7856 return rc;
7857}
7858
7859
7860#ifdef IEM_WITH_SETJMP
7861/**
7862 * Stores a data dqword, longjmp on error.
7863 *
7864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7865 * @param iSegReg The index of the segment register to use for
7866 * this access. The base and limits are checked.
7867 * @param GCPtrMem The address of the guest memory.
7868 * @param u128Value The value to store.
7869 */
7870void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7871{
7872 /* The lazy approach for now... */
7873 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7874 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7875 pu128Dst->au64[0] = u128Value.au64[0];
7876 pu128Dst->au64[1] = u128Value.au64[1];
7877 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7878}
7879#endif
7880
7881
7882/**
7883 * Stores a data dqword, SSE aligned.
7884 *
7885 * @returns Strict VBox status code.
7886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7887 * @param iSegReg The index of the segment register to use for
7888 * this access. The base and limits are checked.
7889 * @param GCPtrMem The address of the guest memory.
7890 * @param u128Value The value to store.
7891 */
7892VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7893{
7894 /* The lazy approach for now... */
7895 PRTUINT128U pu128Dst;
7896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7897 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7898 if (rc == VINF_SUCCESS)
7899 {
7900 pu128Dst->au64[0] = u128Value.au64[0];
7901 pu128Dst->au64[1] = u128Value.au64[1];
7902 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7903 }
7904 return rc;
7905}
7906
7907
7908#ifdef IEM_WITH_SETJMP
7909/**
7910 * Stores a data dqword, SSE aligned.
7911 *
7912 * @returns Strict VBox status code.
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param iSegReg The index of the segment register to use for
7915 * this access. The base and limits are checked.
7916 * @param GCPtrMem The address of the guest memory.
7917 * @param u128Value The value to store.
7918 */
7919void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7920 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7921{
7922 /* The lazy approach for now... */
7923 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7924 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7925 pu128Dst->au64[0] = u128Value.au64[0];
7926 pu128Dst->au64[1] = u128Value.au64[1];
7927 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7928}
7929#endif
7930
7931
7932/**
7933 * Stores a data dqword.
7934 *
7935 * @returns Strict VBox status code.
7936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7937 * @param iSegReg The index of the segment register to use for
7938 * this access. The base and limits are checked.
7939 * @param GCPtrMem The address of the guest memory.
7940 * @param pu256Value Pointer to the value to store.
7941 */
7942VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7943{
7944 /* The lazy approach for now... */
7945 PRTUINT256U pu256Dst;
7946 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7947 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7948 if (rc == VINF_SUCCESS)
7949 {
7950 pu256Dst->au64[0] = pu256Value->au64[0];
7951 pu256Dst->au64[1] = pu256Value->au64[1];
7952 pu256Dst->au64[2] = pu256Value->au64[2];
7953 pu256Dst->au64[3] = pu256Value->au64[3];
7954 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7955 }
7956 return rc;
7957}
7958
7959
7960#ifdef IEM_WITH_SETJMP
7961/**
7962 * Stores a data dqword, longjmp on error.
7963 *
7964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7965 * @param iSegReg The index of the segment register to use for
7966 * this access. The base and limits are checked.
7967 * @param GCPtrMem The address of the guest memory.
7968 * @param pu256Value Pointer to the value to store.
7969 */
7970void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7971{
7972 /* The lazy approach for now... */
7973 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7974 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7975 pu256Dst->au64[0] = pu256Value->au64[0];
7976 pu256Dst->au64[1] = pu256Value->au64[1];
7977 pu256Dst->au64[2] = pu256Value->au64[2];
7978 pu256Dst->au64[3] = pu256Value->au64[3];
7979 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7980}
7981#endif
7982
7983
7984/**
7985 * Stores a data dqword, AVX \#GP(0) aligned.
7986 *
7987 * @returns Strict VBox status code.
7988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7989 * @param iSegReg The index of the segment register to use for
7990 * this access. The base and limits are checked.
7991 * @param GCPtrMem The address of the guest memory.
7992 * @param pu256Value Pointer to the value to store.
7993 */
7994VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7995{
7996 /* The lazy approach for now... */
7997 PRTUINT256U pu256Dst;
7998 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7999 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8000 if (rc == VINF_SUCCESS)
8001 {
8002 pu256Dst->au64[0] = pu256Value->au64[0];
8003 pu256Dst->au64[1] = pu256Value->au64[1];
8004 pu256Dst->au64[2] = pu256Value->au64[2];
8005 pu256Dst->au64[3] = pu256Value->au64[3];
8006 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8007 }
8008 return rc;
8009}
8010
8011
8012#ifdef IEM_WITH_SETJMP
8013/**
8014 * Stores a data dqword, AVX aligned.
8015 *
8016 * @returns Strict VBox status code.
8017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8018 * @param iSegReg The index of the segment register to use for
8019 * this access. The base and limits are checked.
8020 * @param GCPtrMem The address of the guest memory.
8021 * @param pu256Value Pointer to the value to store.
8022 */
8023void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8024 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8025{
8026 /* The lazy approach for now... */
8027 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8028 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8029 pu256Dst->au64[0] = pu256Value->au64[0];
8030 pu256Dst->au64[1] = pu256Value->au64[1];
8031 pu256Dst->au64[2] = pu256Value->au64[2];
8032 pu256Dst->au64[3] = pu256Value->au64[3];
8033 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8034}
8035#endif
8036
8037
8038/**
8039 * Stores a descriptor register (sgdt, sidt).
8040 *
8041 * @returns Strict VBox status code.
8042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8043 * @param cbLimit The limit.
8044 * @param GCPtrBase The base address.
8045 * @param iSegReg The index of the segment register to use for
8046 * this access. The base and limits are checked.
8047 * @param GCPtrMem The address of the guest memory.
8048 */
8049VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8050{
8051 /*
8052 * The SIDT and SGDT instructions actually stores the data using two
8053 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8054 * does not respond to opsize prefixes.
8055 */
8056 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8057 if (rcStrict == VINF_SUCCESS)
8058 {
8059 if (IEM_IS_16BIT_CODE(pVCpu))
8060 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8061 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8062 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8063 else if (IEM_IS_32BIT_CODE(pVCpu))
8064 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8065 else
8066 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8067 }
8068 return rcStrict;
8069}
8070
8071
8072/**
8073 * Pushes a word onto the stack.
8074 *
8075 * @returns Strict VBox status code.
8076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8077 * @param u16Value The value to push.
8078 */
8079VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8080{
8081 /* Increment the stack pointer. */
8082 uint64_t uNewRsp;
8083 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8084
8085 /* Write the word the lazy way. */
8086 uint16_t *pu16Dst;
8087 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8088 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8089 if (rc == VINF_SUCCESS)
8090 {
8091 *pu16Dst = u16Value;
8092 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8093 }
8094
8095 /* Commit the new RSP value unless we an access handler made trouble. */
8096 if (rc == VINF_SUCCESS)
8097 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8098
8099 return rc;
8100}
8101
8102
8103/**
8104 * Pushes a dword onto the stack.
8105 *
8106 * @returns Strict VBox status code.
8107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8108 * @param u32Value The value to push.
8109 */
8110VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8111{
8112 /* Increment the stack pointer. */
8113 uint64_t uNewRsp;
8114 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8115
8116 /* Write the dword the lazy way. */
8117 uint32_t *pu32Dst;
8118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8119 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8120 if (rc == VINF_SUCCESS)
8121 {
8122 *pu32Dst = u32Value;
8123 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8124 }
8125
8126 /* Commit the new RSP value unless we an access handler made trouble. */
8127 if (rc == VINF_SUCCESS)
8128 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8129
8130 return rc;
8131}
8132
8133
8134/**
8135 * Pushes a dword segment register value onto the stack.
8136 *
8137 * @returns Strict VBox status code.
8138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8139 * @param u32Value The value to push.
8140 */
8141VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8142{
8143 /* Increment the stack pointer. */
8144 uint64_t uNewRsp;
8145 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8146
8147 /* The intel docs talks about zero extending the selector register
8148 value. My actual intel CPU here might be zero extending the value
8149 but it still only writes the lower word... */
8150 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8151 * happens when crossing an electric page boundrary, is the high word checked
8152 * for write accessibility or not? Probably it is. What about segment limits?
8153 * It appears this behavior is also shared with trap error codes.
8154 *
8155 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8156 * ancient hardware when it actually did change. */
8157 uint16_t *pu16Dst;
8158 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8159 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8160 if (rc == VINF_SUCCESS)
8161 {
8162 *pu16Dst = (uint16_t)u32Value;
8163 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8164 }
8165
8166 /* Commit the new RSP value unless we an access handler made trouble. */
8167 if (rc == VINF_SUCCESS)
8168 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8169
8170 return rc;
8171}
8172
8173
8174/**
8175 * Pushes a qword onto the stack.
8176 *
8177 * @returns Strict VBox status code.
8178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8179 * @param u64Value The value to push.
8180 */
8181VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8182{
8183 /* Increment the stack pointer. */
8184 uint64_t uNewRsp;
8185 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8186
8187 /* Write the word the lazy way. */
8188 uint64_t *pu64Dst;
8189 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8190 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8191 if (rc == VINF_SUCCESS)
8192 {
8193 *pu64Dst = u64Value;
8194 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8195 }
8196
8197 /* Commit the new RSP value unless we an access handler made trouble. */
8198 if (rc == VINF_SUCCESS)
8199 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8200
8201 return rc;
8202}
8203
8204
8205/**
8206 * Pops a word from the stack.
8207 *
8208 * @returns Strict VBox status code.
8209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8210 * @param pu16Value Where to store the popped value.
8211 */
8212VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8213{
8214 /* Increment the stack pointer. */
8215 uint64_t uNewRsp;
8216 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8217
8218 /* Write the word the lazy way. */
8219 uint16_t const *pu16Src;
8220 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8221 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8222 if (rc == VINF_SUCCESS)
8223 {
8224 *pu16Value = *pu16Src;
8225 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8226
8227 /* Commit the new RSP value. */
8228 if (rc == VINF_SUCCESS)
8229 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8230 }
8231
8232 return rc;
8233}
8234
8235
8236/**
8237 * Pops a dword from the stack.
8238 *
8239 * @returns Strict VBox status code.
8240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8241 * @param pu32Value Where to store the popped value.
8242 */
8243VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8244{
8245 /* Increment the stack pointer. */
8246 uint64_t uNewRsp;
8247 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8248
8249 /* Write the word the lazy way. */
8250 uint32_t const *pu32Src;
8251 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8252 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8253 if (rc == VINF_SUCCESS)
8254 {
8255 *pu32Value = *pu32Src;
8256 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8257
8258 /* Commit the new RSP value. */
8259 if (rc == VINF_SUCCESS)
8260 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8261 }
8262
8263 return rc;
8264}
8265
8266
8267/**
8268 * Pops a qword from the stack.
8269 *
8270 * @returns Strict VBox status code.
8271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8272 * @param pu64Value Where to store the popped value.
8273 */
8274VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8275{
8276 /* Increment the stack pointer. */
8277 uint64_t uNewRsp;
8278 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8279
8280 /* Write the word the lazy way. */
8281 uint64_t const *pu64Src;
8282 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8283 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8284 if (rc == VINF_SUCCESS)
8285 {
8286 *pu64Value = *pu64Src;
8287 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8288
8289 /* Commit the new RSP value. */
8290 if (rc == VINF_SUCCESS)
8291 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8292 }
8293
8294 return rc;
8295}
8296
8297
8298/**
8299 * Pushes a word onto the stack, using a temporary stack pointer.
8300 *
8301 * @returns Strict VBox status code.
8302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8303 * @param u16Value The value to push.
8304 * @param pTmpRsp Pointer to the temporary stack pointer.
8305 */
8306VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8307{
8308 /* Increment the stack pointer. */
8309 RTUINT64U NewRsp = *pTmpRsp;
8310 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8311
8312 /* Write the word the lazy way. */
8313 uint16_t *pu16Dst;
8314 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8315 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8316 if (rc == VINF_SUCCESS)
8317 {
8318 *pu16Dst = u16Value;
8319 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8320 }
8321
8322 /* Commit the new RSP value unless we an access handler made trouble. */
8323 if (rc == VINF_SUCCESS)
8324 *pTmpRsp = NewRsp;
8325
8326 return rc;
8327}
8328
8329
8330/**
8331 * Pushes a dword onto the stack, using a temporary stack pointer.
8332 *
8333 * @returns Strict VBox status code.
8334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8335 * @param u32Value The value to push.
8336 * @param pTmpRsp Pointer to the temporary stack pointer.
8337 */
8338VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8339{
8340 /* Increment the stack pointer. */
8341 RTUINT64U NewRsp = *pTmpRsp;
8342 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8343
8344 /* Write the word the lazy way. */
8345 uint32_t *pu32Dst;
8346 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8347 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8348 if (rc == VINF_SUCCESS)
8349 {
8350 *pu32Dst = u32Value;
8351 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8352 }
8353
8354 /* Commit the new RSP value unless we an access handler made trouble. */
8355 if (rc == VINF_SUCCESS)
8356 *pTmpRsp = NewRsp;
8357
8358 return rc;
8359}
8360
8361
8362/**
8363 * Pushes a dword onto the stack, using a temporary stack pointer.
8364 *
8365 * @returns Strict VBox status code.
8366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8367 * @param u64Value The value to push.
8368 * @param pTmpRsp Pointer to the temporary stack pointer.
8369 */
8370VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8371{
8372 /* Increment the stack pointer. */
8373 RTUINT64U NewRsp = *pTmpRsp;
8374 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8375
8376 /* Write the word the lazy way. */
8377 uint64_t *pu64Dst;
8378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8379 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8380 if (rc == VINF_SUCCESS)
8381 {
8382 *pu64Dst = u64Value;
8383 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8384 }
8385
8386 /* Commit the new RSP value unless we an access handler made trouble. */
8387 if (rc == VINF_SUCCESS)
8388 *pTmpRsp = NewRsp;
8389
8390 return rc;
8391}
8392
8393
8394/**
8395 * Pops a word from the stack, using a temporary stack pointer.
8396 *
8397 * @returns Strict VBox status code.
8398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8399 * @param pu16Value Where to store the popped value.
8400 * @param pTmpRsp Pointer to the temporary stack pointer.
8401 */
8402VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8403{
8404 /* Increment the stack pointer. */
8405 RTUINT64U NewRsp = *pTmpRsp;
8406 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8407
8408 /* Write the word the lazy way. */
8409 uint16_t const *pu16Src;
8410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8411 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8412 if (rc == VINF_SUCCESS)
8413 {
8414 *pu16Value = *pu16Src;
8415 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8416
8417 /* Commit the new RSP value. */
8418 if (rc == VINF_SUCCESS)
8419 *pTmpRsp = NewRsp;
8420 }
8421
8422 return rc;
8423}
8424
8425
8426/**
8427 * Pops a dword from the stack, using a temporary stack pointer.
8428 *
8429 * @returns Strict VBox status code.
8430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8431 * @param pu32Value Where to store the popped value.
8432 * @param pTmpRsp Pointer to the temporary stack pointer.
8433 */
8434VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8435{
8436 /* Increment the stack pointer. */
8437 RTUINT64U NewRsp = *pTmpRsp;
8438 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8439
8440 /* Write the word the lazy way. */
8441 uint32_t const *pu32Src;
8442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8443 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8444 if (rc == VINF_SUCCESS)
8445 {
8446 *pu32Value = *pu32Src;
8447 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8448
8449 /* Commit the new RSP value. */
8450 if (rc == VINF_SUCCESS)
8451 *pTmpRsp = NewRsp;
8452 }
8453
8454 return rc;
8455}
8456
8457
8458/**
8459 * Pops a qword from the stack, using a temporary stack pointer.
8460 *
8461 * @returns Strict VBox status code.
8462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8463 * @param pu64Value Where to store the popped value.
8464 * @param pTmpRsp Pointer to the temporary stack pointer.
8465 */
8466VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8467{
8468 /* Increment the stack pointer. */
8469 RTUINT64U NewRsp = *pTmpRsp;
8470 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8471
8472 /* Write the word the lazy way. */
8473 uint64_t const *pu64Src;
8474 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8475 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8476 if (rcStrict == VINF_SUCCESS)
8477 {
8478 *pu64Value = *pu64Src;
8479 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8480
8481 /* Commit the new RSP value. */
8482 if (rcStrict == VINF_SUCCESS)
8483 *pTmpRsp = NewRsp;
8484 }
8485
8486 return rcStrict;
8487}
8488
8489
8490/**
8491 * Begin a special stack push (used by interrupt, exceptions and such).
8492 *
8493 * This will raise \#SS or \#PF if appropriate.
8494 *
8495 * @returns Strict VBox status code.
8496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8497 * @param cbMem The number of bytes to push onto the stack.
8498 * @param cbAlign The alignment mask (7, 3, 1).
8499 * @param ppvMem Where to return the pointer to the stack memory.
8500 * As with the other memory functions this could be
8501 * direct access or bounce buffered access, so
8502 * don't commit register until the commit call
8503 * succeeds.
8504 * @param puNewRsp Where to return the new RSP value. This must be
8505 * passed unchanged to
8506 * iemMemStackPushCommitSpecial().
8507 */
8508VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8509 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8510{
8511 Assert(cbMem < UINT8_MAX);
8512 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8513 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8514 IEM_ACCESS_STACK_W, cbAlign);
8515}
8516
8517
8518/**
8519 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8520 *
8521 * This will update the rSP.
8522 *
8523 * @returns Strict VBox status code.
8524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8525 * @param pvMem The pointer returned by
8526 * iemMemStackPushBeginSpecial().
8527 * @param uNewRsp The new RSP value returned by
8528 * iemMemStackPushBeginSpecial().
8529 */
8530VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8531{
8532 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8533 if (rcStrict == VINF_SUCCESS)
8534 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8535 return rcStrict;
8536}
8537
8538
8539/**
8540 * Begin a special stack pop (used by iret, retf and such).
8541 *
8542 * This will raise \#SS or \#PF if appropriate.
8543 *
8544 * @returns Strict VBox status code.
8545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8546 * @param cbMem The number of bytes to pop from the stack.
8547 * @param cbAlign The alignment mask (7, 3, 1).
8548 * @param ppvMem Where to return the pointer to the stack memory.
8549 * @param puNewRsp Where to return the new RSP value. This must be
8550 * assigned to CPUMCTX::rsp manually some time
8551 * after iemMemStackPopDoneSpecial() has been
8552 * called.
8553 */
8554VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8555 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8556{
8557 Assert(cbMem < UINT8_MAX);
8558 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8559 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8560}
8561
8562
8563/**
8564 * Continue a special stack pop (used by iret and retf), for the purpose of
8565 * retrieving a new stack pointer.
8566 *
8567 * This will raise \#SS or \#PF if appropriate.
8568 *
8569 * @returns Strict VBox status code.
8570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8571 * @param off Offset from the top of the stack. This is zero
8572 * except in the retf case.
8573 * @param cbMem The number of bytes to pop from the stack.
8574 * @param ppvMem Where to return the pointer to the stack memory.
8575 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8576 * return this because all use of this function is
8577 * to retrieve a new value and anything we return
8578 * here would be discarded.)
8579 */
8580VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8581 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8582{
8583 Assert(cbMem < UINT8_MAX);
8584
8585 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8586 RTGCPTR GCPtrTop;
8587 if (IEM_IS_64BIT_CODE(pVCpu))
8588 GCPtrTop = uCurNewRsp;
8589 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8590 GCPtrTop = (uint32_t)uCurNewRsp;
8591 else
8592 GCPtrTop = (uint16_t)uCurNewRsp;
8593
8594 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8595 0 /* checked in iemMemStackPopBeginSpecial */);
8596}
8597
8598
8599/**
8600 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8601 * iemMemStackPopContinueSpecial).
8602 *
8603 * The caller will manually commit the rSP.
8604 *
8605 * @returns Strict VBox status code.
8606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8607 * @param pvMem The pointer returned by
8608 * iemMemStackPopBeginSpecial() or
8609 * iemMemStackPopContinueSpecial().
8610 */
8611VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8612{
8613 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8614}
8615
8616
8617/**
8618 * Fetches a system table byte.
8619 *
8620 * @returns Strict VBox status code.
8621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8622 * @param pbDst Where to return the byte.
8623 * @param iSegReg The index of the segment register to use for
8624 * this access. The base and limits are checked.
8625 * @param GCPtrMem The address of the guest memory.
8626 */
8627VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8628{
8629 /* The lazy approach for now... */
8630 uint8_t const *pbSrc;
8631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8632 if (rc == VINF_SUCCESS)
8633 {
8634 *pbDst = *pbSrc;
8635 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8636 }
8637 return rc;
8638}
8639
8640
8641/**
8642 * Fetches a system table word.
8643 *
8644 * @returns Strict VBox status code.
8645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8646 * @param pu16Dst Where to return the word.
8647 * @param iSegReg The index of the segment register to use for
8648 * this access. The base and limits are checked.
8649 * @param GCPtrMem The address of the guest memory.
8650 */
8651VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8652{
8653 /* The lazy approach for now... */
8654 uint16_t const *pu16Src;
8655 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8656 if (rc == VINF_SUCCESS)
8657 {
8658 *pu16Dst = *pu16Src;
8659 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8660 }
8661 return rc;
8662}
8663
8664
8665/**
8666 * Fetches a system table dword.
8667 *
8668 * @returns Strict VBox status code.
8669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8670 * @param pu32Dst Where to return the dword.
8671 * @param iSegReg The index of the segment register to use for
8672 * this access. The base and limits are checked.
8673 * @param GCPtrMem The address of the guest memory.
8674 */
8675VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8676{
8677 /* The lazy approach for now... */
8678 uint32_t const *pu32Src;
8679 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8680 if (rc == VINF_SUCCESS)
8681 {
8682 *pu32Dst = *pu32Src;
8683 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8684 }
8685 return rc;
8686}
8687
8688
8689/**
8690 * Fetches a system table qword.
8691 *
8692 * @returns Strict VBox status code.
8693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8694 * @param pu64Dst Where to return the qword.
8695 * @param iSegReg The index of the segment register to use for
8696 * this access. The base and limits are checked.
8697 * @param GCPtrMem The address of the guest memory.
8698 */
8699VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8700{
8701 /* The lazy approach for now... */
8702 uint64_t const *pu64Src;
8703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8704 if (rc == VINF_SUCCESS)
8705 {
8706 *pu64Dst = *pu64Src;
8707 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8708 }
8709 return rc;
8710}
8711
8712
8713/**
8714 * Fetches a descriptor table entry with caller specified error code.
8715 *
8716 * @returns Strict VBox status code.
8717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8718 * @param pDesc Where to return the descriptor table entry.
8719 * @param uSel The selector which table entry to fetch.
8720 * @param uXcpt The exception to raise on table lookup error.
8721 * @param uErrorCode The error code associated with the exception.
8722 */
8723static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8724 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8725{
8726 AssertPtr(pDesc);
8727 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8728
8729 /** @todo did the 286 require all 8 bytes to be accessible? */
8730 /*
8731 * Get the selector table base and check bounds.
8732 */
8733 RTGCPTR GCPtrBase;
8734 if (uSel & X86_SEL_LDT)
8735 {
8736 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8737 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8738 {
8739 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8740 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8741 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8742 uErrorCode, 0);
8743 }
8744
8745 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8746 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8747 }
8748 else
8749 {
8750 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8751 {
8752 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8753 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8754 uErrorCode, 0);
8755 }
8756 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8757 }
8758
8759 /*
8760 * Read the legacy descriptor and maybe the long mode extensions if
8761 * required.
8762 */
8763 VBOXSTRICTRC rcStrict;
8764 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8765 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8766 else
8767 {
8768 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8769 if (rcStrict == VINF_SUCCESS)
8770 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8771 if (rcStrict == VINF_SUCCESS)
8772 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8773 if (rcStrict == VINF_SUCCESS)
8774 pDesc->Legacy.au16[3] = 0;
8775 else
8776 return rcStrict;
8777 }
8778
8779 if (rcStrict == VINF_SUCCESS)
8780 {
8781 if ( !IEM_IS_LONG_MODE(pVCpu)
8782 || pDesc->Legacy.Gen.u1DescType)
8783 pDesc->Long.au64[1] = 0;
8784 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8785 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8786 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8787 else
8788 {
8789 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8790 /** @todo is this the right exception? */
8791 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8792 }
8793 }
8794 return rcStrict;
8795}
8796
8797
8798/**
8799 * Fetches a descriptor table entry.
8800 *
8801 * @returns Strict VBox status code.
8802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8803 * @param pDesc Where to return the descriptor table entry.
8804 * @param uSel The selector which table entry to fetch.
8805 * @param uXcpt The exception to raise on table lookup error.
8806 */
8807VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8808{
8809 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8810}
8811
8812
8813/**
8814 * Marks the selector descriptor as accessed (only non-system descriptors).
8815 *
8816 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8817 * will therefore skip the limit checks.
8818 *
8819 * @returns Strict VBox status code.
8820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8821 * @param uSel The selector.
8822 */
8823VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8824{
8825 /*
8826 * Get the selector table base and calculate the entry address.
8827 */
8828 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8829 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8830 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8831 GCPtr += uSel & X86_SEL_MASK;
8832
8833 /*
8834 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8835 * ugly stuff to avoid this. This will make sure it's an atomic access
8836 * as well more or less remove any question about 8-bit or 32-bit accesss.
8837 */
8838 VBOXSTRICTRC rcStrict;
8839 uint32_t volatile *pu32;
8840 if ((GCPtr & 3) == 0)
8841 {
8842 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8843 GCPtr += 2 + 2;
8844 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8845 if (rcStrict != VINF_SUCCESS)
8846 return rcStrict;
8847 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8848 }
8849 else
8850 {
8851 /* The misaligned GDT/LDT case, map the whole thing. */
8852 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8853 if (rcStrict != VINF_SUCCESS)
8854 return rcStrict;
8855 switch ((uintptr_t)pu32 & 3)
8856 {
8857 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8858 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8859 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8860 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8861 }
8862 }
8863
8864 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8865}
8866
8867/** @} */
8868
8869/** @name Opcode Helpers.
8870 * @{
8871 */
8872
8873/**
8874 * Calculates the effective address of a ModR/M memory operand.
8875 *
8876 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8877 *
8878 * @return Strict VBox status code.
8879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8880 * @param bRm The ModRM byte.
8881 * @param cbImmAndRspOffset - First byte: The size of any immediate
8882 * following the effective address opcode bytes
8883 * (only for RIP relative addressing).
8884 * - Second byte: RSP displacement (for POP [ESP]).
8885 * @param pGCPtrEff Where to return the effective address.
8886 */
8887VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8888{
8889 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8890# define SET_SS_DEF() \
8891 do \
8892 { \
8893 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8894 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8895 } while (0)
8896
8897 if (!IEM_IS_64BIT_CODE(pVCpu))
8898 {
8899/** @todo Check the effective address size crap! */
8900 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8901 {
8902 uint16_t u16EffAddr;
8903
8904 /* Handle the disp16 form with no registers first. */
8905 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8906 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8907 else
8908 {
8909 /* Get the displacment. */
8910 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8911 {
8912 case 0: u16EffAddr = 0; break;
8913 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8914 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8915 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8916 }
8917
8918 /* Add the base and index registers to the disp. */
8919 switch (bRm & X86_MODRM_RM_MASK)
8920 {
8921 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8922 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8923 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8924 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8925 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8926 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8927 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8928 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8929 }
8930 }
8931
8932 *pGCPtrEff = u16EffAddr;
8933 }
8934 else
8935 {
8936 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8937 uint32_t u32EffAddr;
8938
8939 /* Handle the disp32 form with no registers first. */
8940 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8941 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8942 else
8943 {
8944 /* Get the register (or SIB) value. */
8945 switch ((bRm & X86_MODRM_RM_MASK))
8946 {
8947 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8948 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8949 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8950 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8951 case 4: /* SIB */
8952 {
8953 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8954
8955 /* Get the index and scale it. */
8956 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8957 {
8958 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8959 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8960 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8961 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8962 case 4: u32EffAddr = 0; /*none */ break;
8963 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8964 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8965 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8967 }
8968 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8969
8970 /* add base */
8971 switch (bSib & X86_SIB_BASE_MASK)
8972 {
8973 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8974 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8975 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8976 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8977 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8978 case 5:
8979 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8980 {
8981 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8982 SET_SS_DEF();
8983 }
8984 else
8985 {
8986 uint32_t u32Disp;
8987 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8988 u32EffAddr += u32Disp;
8989 }
8990 break;
8991 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8992 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8994 }
8995 break;
8996 }
8997 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8998 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8999 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9001 }
9002
9003 /* Get and add the displacement. */
9004 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9005 {
9006 case 0:
9007 break;
9008 case 1:
9009 {
9010 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9011 u32EffAddr += i8Disp;
9012 break;
9013 }
9014 case 2:
9015 {
9016 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9017 u32EffAddr += u32Disp;
9018 break;
9019 }
9020 default:
9021 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9022 }
9023
9024 }
9025 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9026 *pGCPtrEff = u32EffAddr;
9027 else
9028 {
9029 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9030 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9031 }
9032 }
9033 }
9034 else
9035 {
9036 uint64_t u64EffAddr;
9037
9038 /* Handle the rip+disp32 form with no registers first. */
9039 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9040 {
9041 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9042 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9043 }
9044 else
9045 {
9046 /* Get the register (or SIB) value. */
9047 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9048 {
9049 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9050 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9051 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9052 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9053 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9054 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9055 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9056 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9057 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9058 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9059 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9060 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9061 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9062 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9063 /* SIB */
9064 case 4:
9065 case 12:
9066 {
9067 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9068
9069 /* Get the index and scale it. */
9070 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9071 {
9072 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9073 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9074 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9075 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9076 case 4: u64EffAddr = 0; /*none */ break;
9077 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9078 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9079 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9080 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9081 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9082 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9083 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9084 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9085 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9086 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9087 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9088 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9089 }
9090 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9091
9092 /* add base */
9093 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9094 {
9095 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9096 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9097 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9098 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9099 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9100 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9101 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9102 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9103 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9104 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9105 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9106 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9107 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9108 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9109 /* complicated encodings */
9110 case 5:
9111 case 13:
9112 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9113 {
9114 if (!pVCpu->iem.s.uRexB)
9115 {
9116 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9117 SET_SS_DEF();
9118 }
9119 else
9120 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9121 }
9122 else
9123 {
9124 uint32_t u32Disp;
9125 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9126 u64EffAddr += (int32_t)u32Disp;
9127 }
9128 break;
9129 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9130 }
9131 break;
9132 }
9133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9134 }
9135
9136 /* Get and add the displacement. */
9137 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9138 {
9139 case 0:
9140 break;
9141 case 1:
9142 {
9143 int8_t i8Disp;
9144 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9145 u64EffAddr += i8Disp;
9146 break;
9147 }
9148 case 2:
9149 {
9150 uint32_t u32Disp;
9151 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9152 u64EffAddr += (int32_t)u32Disp;
9153 break;
9154 }
9155 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9156 }
9157
9158 }
9159
9160 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9161 *pGCPtrEff = u64EffAddr;
9162 else
9163 {
9164 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9165 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9166 }
9167 }
9168
9169 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9170 return VINF_SUCCESS;
9171}
9172
9173
9174#ifdef IEM_WITH_SETJMP
9175/**
9176 * Calculates the effective address of a ModR/M memory operand.
9177 *
9178 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9179 *
9180 * May longjmp on internal error.
9181 *
9182 * @return The effective address.
9183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9184 * @param bRm The ModRM byte.
9185 * @param cbImmAndRspOffset - First byte: The size of any immediate
9186 * following the effective address opcode bytes
9187 * (only for RIP relative addressing).
9188 * - Second byte: RSP displacement (for POP [ESP]).
9189 */
9190RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9191{
9192 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9193# define SET_SS_DEF() \
9194 do \
9195 { \
9196 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9197 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9198 } while (0)
9199
9200 if (!IEM_IS_64BIT_CODE(pVCpu))
9201 {
9202/** @todo Check the effective address size crap! */
9203 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9204 {
9205 uint16_t u16EffAddr;
9206
9207 /* Handle the disp16 form with no registers first. */
9208 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9209 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9210 else
9211 {
9212 /* Get the displacment. */
9213 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9214 {
9215 case 0: u16EffAddr = 0; break;
9216 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9217 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9218 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9219 }
9220
9221 /* Add the base and index registers to the disp. */
9222 switch (bRm & X86_MODRM_RM_MASK)
9223 {
9224 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9225 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9226 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9227 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9228 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9229 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9230 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9231 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9232 }
9233 }
9234
9235 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9236 return u16EffAddr;
9237 }
9238
9239 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9240 uint32_t u32EffAddr;
9241
9242 /* Handle the disp32 form with no registers first. */
9243 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9244 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9245 else
9246 {
9247 /* Get the register (or SIB) value. */
9248 switch ((bRm & X86_MODRM_RM_MASK))
9249 {
9250 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9251 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9252 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9253 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9254 case 4: /* SIB */
9255 {
9256 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9257
9258 /* Get the index and scale it. */
9259 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9260 {
9261 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9262 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9263 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9264 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9265 case 4: u32EffAddr = 0; /*none */ break;
9266 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9267 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9268 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9269 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9270 }
9271 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9272
9273 /* add base */
9274 switch (bSib & X86_SIB_BASE_MASK)
9275 {
9276 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9277 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9278 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9279 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9280 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9281 case 5:
9282 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9283 {
9284 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9285 SET_SS_DEF();
9286 }
9287 else
9288 {
9289 uint32_t u32Disp;
9290 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9291 u32EffAddr += u32Disp;
9292 }
9293 break;
9294 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9295 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9296 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9297 }
9298 break;
9299 }
9300 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9301 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9302 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9303 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9304 }
9305
9306 /* Get and add the displacement. */
9307 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9308 {
9309 case 0:
9310 break;
9311 case 1:
9312 {
9313 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9314 u32EffAddr += i8Disp;
9315 break;
9316 }
9317 case 2:
9318 {
9319 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9320 u32EffAddr += u32Disp;
9321 break;
9322 }
9323 default:
9324 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9325 }
9326 }
9327
9328 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9329 {
9330 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9331 return u32EffAddr;
9332 }
9333 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9334 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9335 return u32EffAddr & UINT16_MAX;
9336 }
9337
9338 uint64_t u64EffAddr;
9339
9340 /* Handle the rip+disp32 form with no registers first. */
9341 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9342 {
9343 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9344 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9345 }
9346 else
9347 {
9348 /* Get the register (or SIB) value. */
9349 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9350 {
9351 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9352 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9353 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9354 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9355 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9356 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9357 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9358 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9359 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9360 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9361 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9362 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9363 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9364 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9365 /* SIB */
9366 case 4:
9367 case 12:
9368 {
9369 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9370
9371 /* Get the index and scale it. */
9372 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9373 {
9374 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9375 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9376 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9377 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9378 case 4: u64EffAddr = 0; /*none */ break;
9379 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9380 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9381 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9382 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9383 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9384 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9385 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9386 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9387 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9388 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9389 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9390 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9391 }
9392 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9393
9394 /* add base */
9395 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9396 {
9397 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9398 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9399 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9400 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9401 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9402 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9403 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9404 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9405 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9406 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9407 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9408 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9409 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9410 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9411 /* complicated encodings */
9412 case 5:
9413 case 13:
9414 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9415 {
9416 if (!pVCpu->iem.s.uRexB)
9417 {
9418 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9419 SET_SS_DEF();
9420 }
9421 else
9422 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9423 }
9424 else
9425 {
9426 uint32_t u32Disp;
9427 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9428 u64EffAddr += (int32_t)u32Disp;
9429 }
9430 break;
9431 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9432 }
9433 break;
9434 }
9435 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9436 }
9437
9438 /* Get and add the displacement. */
9439 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9440 {
9441 case 0:
9442 break;
9443 case 1:
9444 {
9445 int8_t i8Disp;
9446 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9447 u64EffAddr += i8Disp;
9448 break;
9449 }
9450 case 2:
9451 {
9452 uint32_t u32Disp;
9453 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9454 u64EffAddr += (int32_t)u32Disp;
9455 break;
9456 }
9457 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9458 }
9459
9460 }
9461
9462 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9463 {
9464 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9465 return u64EffAddr;
9466 }
9467 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9468 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9469 return u64EffAddr & UINT32_MAX;
9470}
9471#endif /* IEM_WITH_SETJMP */
9472
9473
9474/**
9475 * Calculates the effective address of a ModR/M memory operand, extended version
9476 * for use in the recompilers.
9477 *
9478 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9479 *
9480 * @return Strict VBox status code.
9481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9482 * @param bRm The ModRM byte.
9483 * @param cbImmAndRspOffset - First byte: The size of any immediate
9484 * following the effective address opcode bytes
9485 * (only for RIP relative addressing).
9486 * - Second byte: RSP displacement (for POP [ESP]).
9487 * @param pGCPtrEff Where to return the effective address.
9488 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9489 * SIB byte (bits 39:32).
9490 */
9491VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9492{
9493 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9494# define SET_SS_DEF() \
9495 do \
9496 { \
9497 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9498 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9499 } while (0)
9500
9501 uint64_t uInfo;
9502 if (!IEM_IS_64BIT_CODE(pVCpu))
9503 {
9504/** @todo Check the effective address size crap! */
9505 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9506 {
9507 uint16_t u16EffAddr;
9508
9509 /* Handle the disp16 form with no registers first. */
9510 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9511 {
9512 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9513 uInfo = u16EffAddr;
9514 }
9515 else
9516 {
9517 /* Get the displacment. */
9518 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9519 {
9520 case 0: u16EffAddr = 0; break;
9521 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9522 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9523 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9524 }
9525 uInfo = u16EffAddr;
9526
9527 /* Add the base and index registers to the disp. */
9528 switch (bRm & X86_MODRM_RM_MASK)
9529 {
9530 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9531 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9532 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9533 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9534 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9535 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9536 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9537 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9538 }
9539 }
9540
9541 *pGCPtrEff = u16EffAddr;
9542 }
9543 else
9544 {
9545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9546 uint32_t u32EffAddr;
9547
9548 /* Handle the disp32 form with no registers first. */
9549 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9550 {
9551 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9552 uInfo = u32EffAddr;
9553 }
9554 else
9555 {
9556 /* Get the register (or SIB) value. */
9557 uInfo = 0;
9558 switch ((bRm & X86_MODRM_RM_MASK))
9559 {
9560 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9561 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9562 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9563 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9564 case 4: /* SIB */
9565 {
9566 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9567 uInfo = (uint64_t)bSib << 32;
9568
9569 /* Get the index and scale it. */
9570 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9571 {
9572 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9573 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9574 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9575 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9576 case 4: u32EffAddr = 0; /*none */ break;
9577 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9578 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9579 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9580 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9581 }
9582 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9583
9584 /* add base */
9585 switch (bSib & X86_SIB_BASE_MASK)
9586 {
9587 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9588 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9589 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9590 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9591 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9592 case 5:
9593 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9594 {
9595 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9596 SET_SS_DEF();
9597 }
9598 else
9599 {
9600 uint32_t u32Disp;
9601 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9602 u32EffAddr += u32Disp;
9603 uInfo |= u32Disp;
9604 }
9605 break;
9606 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9607 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9608 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9609 }
9610 break;
9611 }
9612 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9613 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9614 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9615 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9616 }
9617
9618 /* Get and add the displacement. */
9619 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9620 {
9621 case 0:
9622 break;
9623 case 1:
9624 {
9625 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9626 u32EffAddr += i8Disp;
9627 uInfo |= (uint32_t)(int32_t)i8Disp;
9628 break;
9629 }
9630 case 2:
9631 {
9632 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9633 u32EffAddr += u32Disp;
9634 uInfo |= (uint32_t)u32Disp;
9635 break;
9636 }
9637 default:
9638 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9639 }
9640
9641 }
9642 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9643 *pGCPtrEff = u32EffAddr;
9644 else
9645 {
9646 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9647 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9648 }
9649 }
9650 }
9651 else
9652 {
9653 uint64_t u64EffAddr;
9654
9655 /* Handle the rip+disp32 form with no registers first. */
9656 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9657 {
9658 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9659 uInfo = (uint32_t)u64EffAddr;
9660 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9661 }
9662 else
9663 {
9664 /* Get the register (or SIB) value. */
9665 uInfo = 0;
9666 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9667 {
9668 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9669 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9670 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9671 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9672 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9673 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9674 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9675 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9676 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9677 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9678 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9679 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9680 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9681 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9682 /* SIB */
9683 case 4:
9684 case 12:
9685 {
9686 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9687 uInfo = (uint64_t)bSib << 32;
9688
9689 /* Get the index and scale it. */
9690 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9691 {
9692 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9693 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9694 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9695 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9696 case 4: u64EffAddr = 0; /*none */ break;
9697 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9698 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9699 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9700 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9701 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9702 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9703 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9704 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9705 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9706 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9707 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9709 }
9710 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9711
9712 /* add base */
9713 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9714 {
9715 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9716 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9717 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9718 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9719 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9720 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9721 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9722 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9723 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9724 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9725 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9726 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9727 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9728 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9729 /* complicated encodings */
9730 case 5:
9731 case 13:
9732 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9733 {
9734 if (!pVCpu->iem.s.uRexB)
9735 {
9736 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9737 SET_SS_DEF();
9738 }
9739 else
9740 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9741 }
9742 else
9743 {
9744 uint32_t u32Disp;
9745 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9746 u64EffAddr += (int32_t)u32Disp;
9747 uInfo |= u32Disp;
9748 }
9749 break;
9750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9751 }
9752 break;
9753 }
9754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9755 }
9756
9757 /* Get and add the displacement. */
9758 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9759 {
9760 case 0:
9761 break;
9762 case 1:
9763 {
9764 int8_t i8Disp;
9765 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9766 u64EffAddr += i8Disp;
9767 uInfo |= (uint32_t)(int32_t)i8Disp;
9768 break;
9769 }
9770 case 2:
9771 {
9772 uint32_t u32Disp;
9773 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9774 u64EffAddr += (int32_t)u32Disp;
9775 uInfo |= u32Disp;
9776 break;
9777 }
9778 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9779 }
9780
9781 }
9782
9783 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9784 *pGCPtrEff = u64EffAddr;
9785 else
9786 {
9787 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9788 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9789 }
9790 }
9791 *puInfo = uInfo;
9792
9793 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9794 return VINF_SUCCESS;
9795}
9796
9797/** @} */
9798
9799
9800#ifdef LOG_ENABLED
9801/**
9802 * Logs the current instruction.
9803 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9804 * @param fSameCtx Set if we have the same context information as the VMM,
9805 * clear if we may have already executed an instruction in
9806 * our debug context. When clear, we assume IEMCPU holds
9807 * valid CPU mode info.
9808 *
9809 * The @a fSameCtx parameter is now misleading and obsolete.
9810 * @param pszFunction The IEM function doing the execution.
9811 */
9812static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9813{
9814# ifdef IN_RING3
9815 if (LogIs2Enabled())
9816 {
9817 char szInstr[256];
9818 uint32_t cbInstr = 0;
9819 if (fSameCtx)
9820 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9821 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9822 szInstr, sizeof(szInstr), &cbInstr);
9823 else
9824 {
9825 uint32_t fFlags = 0;
9826 switch (IEM_GET_CPU_MODE(pVCpu))
9827 {
9828 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9829 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9830 case IEMMODE_16BIT:
9831 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9832 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9833 else
9834 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9835 break;
9836 }
9837 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9838 szInstr, sizeof(szInstr), &cbInstr);
9839 }
9840
9841 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9842 Log2(("**** %s fExec=%x\n"
9843 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9844 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9845 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9846 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9847 " %s\n"
9848 , pszFunction, pVCpu->iem.s.fExec,
9849 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9850 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9851 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9852 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9853 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9854 szInstr));
9855
9856 if (LogIs3Enabled())
9857 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9858 }
9859 else
9860# endif
9861 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9862 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9863 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9864}
9865#endif /* LOG_ENABLED */
9866
9867
9868#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9869/**
9870 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9871 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9872 *
9873 * @returns Modified rcStrict.
9874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9875 * @param rcStrict The instruction execution status.
9876 */
9877static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9878{
9879 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9880 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9881 {
9882 /* VMX preemption timer takes priority over NMI-window exits. */
9883 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9884 {
9885 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9886 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9887 }
9888 /*
9889 * Check remaining intercepts.
9890 *
9891 * NMI-window and Interrupt-window VM-exits.
9892 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9893 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9894 *
9895 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9896 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9897 */
9898 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9899 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9900 && !TRPMHasTrap(pVCpu))
9901 {
9902 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9903 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9904 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9905 {
9906 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9907 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9908 }
9909 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9910 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9911 {
9912 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9913 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9914 }
9915 }
9916 }
9917 /* TPR-below threshold/APIC write has the highest priority. */
9918 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9919 {
9920 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9921 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9922 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9923 }
9924 /* MTF takes priority over VMX-preemption timer. */
9925 else
9926 {
9927 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9928 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9929 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9930 }
9931 return rcStrict;
9932}
9933#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9934
9935
9936/**
9937 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9938 * IEMExecOneWithPrefetchedByPC.
9939 *
9940 * Similar code is found in IEMExecLots.
9941 *
9942 * @return Strict VBox status code.
9943 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9944 * @param fExecuteInhibit If set, execute the instruction following CLI,
9945 * POP SS and MOV SS,GR.
9946 * @param pszFunction The calling function name.
9947 */
9948DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9949{
9950 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9951 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9952 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9953 RT_NOREF_PV(pszFunction);
9954
9955#ifdef IEM_WITH_SETJMP
9956 VBOXSTRICTRC rcStrict;
9957 IEM_TRY_SETJMP(pVCpu, rcStrict)
9958 {
9959 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9960 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9961 }
9962 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9963 {
9964 pVCpu->iem.s.cLongJumps++;
9965 }
9966 IEM_CATCH_LONGJMP_END(pVCpu);
9967#else
9968 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9969 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9970#endif
9971 if (rcStrict == VINF_SUCCESS)
9972 pVCpu->iem.s.cInstructions++;
9973 if (pVCpu->iem.s.cActiveMappings > 0)
9974 {
9975 Assert(rcStrict != VINF_SUCCESS);
9976 iemMemRollback(pVCpu);
9977 }
9978 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9979 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9980 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9981
9982//#ifdef DEBUG
9983// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9984//#endif
9985
9986#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9987 /*
9988 * Perform any VMX nested-guest instruction boundary actions.
9989 *
9990 * If any of these causes a VM-exit, we must skip executing the next
9991 * instruction (would run into stale page tables). A VM-exit makes sure
9992 * there is no interrupt-inhibition, so that should ensure we don't go
9993 * to try execute the next instruction. Clearing fExecuteInhibit is
9994 * problematic because of the setjmp/longjmp clobbering above.
9995 */
9996 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9997 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9998 || rcStrict != VINF_SUCCESS)
9999 { /* likely */ }
10000 else
10001 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10002#endif
10003
10004 /* Execute the next instruction as well if a cli, pop ss or
10005 mov ss, Gr has just completed successfully. */
10006 if ( fExecuteInhibit
10007 && rcStrict == VINF_SUCCESS
10008 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10009 {
10010 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10011 if (rcStrict == VINF_SUCCESS)
10012 {
10013#ifdef LOG_ENABLED
10014 iemLogCurInstr(pVCpu, false, pszFunction);
10015#endif
10016#ifdef IEM_WITH_SETJMP
10017 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10018 {
10019 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10020 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10021 }
10022 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10023 {
10024 pVCpu->iem.s.cLongJumps++;
10025 }
10026 IEM_CATCH_LONGJMP_END(pVCpu);
10027#else
10028 IEM_OPCODE_GET_FIRST_U8(&b);
10029 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10030#endif
10031 if (rcStrict == VINF_SUCCESS)
10032 {
10033 pVCpu->iem.s.cInstructions++;
10034#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10035 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10036 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10037 { /* likely */ }
10038 else
10039 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10040#endif
10041 }
10042 if (pVCpu->iem.s.cActiveMappings > 0)
10043 {
10044 Assert(rcStrict != VINF_SUCCESS);
10045 iemMemRollback(pVCpu);
10046 }
10047 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10048 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10049 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10050 }
10051 else if (pVCpu->iem.s.cActiveMappings > 0)
10052 iemMemRollback(pVCpu);
10053 /** @todo drop this after we bake this change into RIP advancing. */
10054 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10055 }
10056
10057 /*
10058 * Return value fiddling, statistics and sanity assertions.
10059 */
10060 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10061
10062 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10064 return rcStrict;
10065}
10066
10067
10068/**
10069 * Execute one instruction.
10070 *
10071 * @return Strict VBox status code.
10072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10073 */
10074VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10075{
10076 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10077#ifdef LOG_ENABLED
10078 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10079#endif
10080
10081 /*
10082 * Do the decoding and emulation.
10083 */
10084 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10085 if (rcStrict == VINF_SUCCESS)
10086 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10087 else if (pVCpu->iem.s.cActiveMappings > 0)
10088 iemMemRollback(pVCpu);
10089
10090 if (rcStrict != VINF_SUCCESS)
10091 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10092 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10093 return rcStrict;
10094}
10095
10096
10097VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10098{
10099 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10100 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10101 if (rcStrict == VINF_SUCCESS)
10102 {
10103 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10104 if (pcbWritten)
10105 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10106 }
10107 else if (pVCpu->iem.s.cActiveMappings > 0)
10108 iemMemRollback(pVCpu);
10109
10110 return rcStrict;
10111}
10112
10113
10114VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10115 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10116{
10117 VBOXSTRICTRC rcStrict;
10118 if ( cbOpcodeBytes
10119 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10120 {
10121 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10122#ifdef IEM_WITH_CODE_TLB
10123 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10124 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10125 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10126 pVCpu->iem.s.offCurInstrStart = 0;
10127 pVCpu->iem.s.offInstrNextByte = 0;
10128 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10129#else
10130 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10131 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10132#endif
10133 rcStrict = VINF_SUCCESS;
10134 }
10135 else
10136 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10137 if (rcStrict == VINF_SUCCESS)
10138 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10139 else if (pVCpu->iem.s.cActiveMappings > 0)
10140 iemMemRollback(pVCpu);
10141
10142 return rcStrict;
10143}
10144
10145
10146VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10147{
10148 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10149 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10150 if (rcStrict == VINF_SUCCESS)
10151 {
10152 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10153 if (pcbWritten)
10154 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10155 }
10156 else if (pVCpu->iem.s.cActiveMappings > 0)
10157 iemMemRollback(pVCpu);
10158
10159 return rcStrict;
10160}
10161
10162
10163VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10164 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10165{
10166 VBOXSTRICTRC rcStrict;
10167 if ( cbOpcodeBytes
10168 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10169 {
10170 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10171#ifdef IEM_WITH_CODE_TLB
10172 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10173 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10174 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10175 pVCpu->iem.s.offCurInstrStart = 0;
10176 pVCpu->iem.s.offInstrNextByte = 0;
10177 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10178#else
10179 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10180 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10181#endif
10182 rcStrict = VINF_SUCCESS;
10183 }
10184 else
10185 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10186 if (rcStrict == VINF_SUCCESS)
10187 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10188 else if (pVCpu->iem.s.cActiveMappings > 0)
10189 iemMemRollback(pVCpu);
10190
10191 return rcStrict;
10192}
10193
10194
10195/**
10196 * For handling split cacheline lock operations when the host has split-lock
10197 * detection enabled.
10198 *
10199 * This will cause the interpreter to disregard the lock prefix and implicit
10200 * locking (xchg).
10201 *
10202 * @returns Strict VBox status code.
10203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10204 */
10205VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10206{
10207 /*
10208 * Do the decoding and emulation.
10209 */
10210 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10211 if (rcStrict == VINF_SUCCESS)
10212 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10213 else if (pVCpu->iem.s.cActiveMappings > 0)
10214 iemMemRollback(pVCpu);
10215
10216 if (rcStrict != VINF_SUCCESS)
10217 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10218 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10219 return rcStrict;
10220}
10221
10222
10223/**
10224 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10225 * inject a pending TRPM trap.
10226 */
10227VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10228{
10229 Assert(TRPMHasTrap(pVCpu));
10230
10231 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10232 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10233 {
10234 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10235#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10236 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10237 if (fIntrEnabled)
10238 {
10239 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10240 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10241 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10242 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10243 else
10244 {
10245 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10246 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10247 }
10248 }
10249#else
10250 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10251#endif
10252 if (fIntrEnabled)
10253 {
10254 uint8_t u8TrapNo;
10255 TRPMEVENT enmType;
10256 uint32_t uErrCode;
10257 RTGCPTR uCr2;
10258 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10259 AssertRC(rc2);
10260 Assert(enmType == TRPM_HARDWARE_INT);
10261 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10262
10263 TRPMResetTrap(pVCpu);
10264
10265#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10266 /* Injecting an event may cause a VM-exit. */
10267 if ( rcStrict != VINF_SUCCESS
10268 && rcStrict != VINF_IEM_RAISED_XCPT)
10269 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10270#else
10271 NOREF(rcStrict);
10272#endif
10273 }
10274 }
10275
10276 return VINF_SUCCESS;
10277}
10278
10279
10280VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10281{
10282 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10283 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10284 Assert(cMaxInstructions > 0);
10285
10286 /*
10287 * See if there is an interrupt pending in TRPM, inject it if we can.
10288 */
10289 /** @todo What if we are injecting an exception and not an interrupt? Is that
10290 * possible here? For now we assert it is indeed only an interrupt. */
10291 if (!TRPMHasTrap(pVCpu))
10292 { /* likely */ }
10293 else
10294 {
10295 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10296 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10297 { /*likely */ }
10298 else
10299 return rcStrict;
10300 }
10301
10302 /*
10303 * Initial decoder init w/ prefetch, then setup setjmp.
10304 */
10305 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10306 if (rcStrict == VINF_SUCCESS)
10307 {
10308#ifdef IEM_WITH_SETJMP
10309 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10310 IEM_TRY_SETJMP(pVCpu, rcStrict)
10311#endif
10312 {
10313 /*
10314 * The run loop. We limit ourselves to 4096 instructions right now.
10315 */
10316 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10318 for (;;)
10319 {
10320 /*
10321 * Log the state.
10322 */
10323#ifdef LOG_ENABLED
10324 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10325#endif
10326
10327 /*
10328 * Do the decoding and emulation.
10329 */
10330 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10331 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10332#ifdef VBOX_STRICT
10333 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10334#endif
10335 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10336 {
10337 Assert(pVCpu->iem.s.cActiveMappings == 0);
10338 pVCpu->iem.s.cInstructions++;
10339
10340#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10341 /* Perform any VMX nested-guest instruction boundary actions. */
10342 uint64_t fCpu = pVCpu->fLocalForcedActions;
10343 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10344 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10345 { /* likely */ }
10346 else
10347 {
10348 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10349 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10350 fCpu = pVCpu->fLocalForcedActions;
10351 else
10352 {
10353 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10354 break;
10355 }
10356 }
10357#endif
10358 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10359 {
10360#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10361 uint64_t fCpu = pVCpu->fLocalForcedActions;
10362#endif
10363 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10364 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10365 | VMCPU_FF_TLB_FLUSH
10366 | VMCPU_FF_UNHALT );
10367
10368 if (RT_LIKELY( ( !fCpu
10369 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10370 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10371 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10372 {
10373 if (--cMaxInstructionsGccStupidity > 0)
10374 {
10375 /* Poll timers every now an then according to the caller's specs. */
10376 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10377 || !TMTimerPollBool(pVM, pVCpu))
10378 {
10379 Assert(pVCpu->iem.s.cActiveMappings == 0);
10380 iemReInitDecoder(pVCpu);
10381 continue;
10382 }
10383 }
10384 }
10385 }
10386 Assert(pVCpu->iem.s.cActiveMappings == 0);
10387 }
10388 else if (pVCpu->iem.s.cActiveMappings > 0)
10389 iemMemRollback(pVCpu);
10390 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10391 break;
10392 }
10393 }
10394#ifdef IEM_WITH_SETJMP
10395 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10396 {
10397 if (pVCpu->iem.s.cActiveMappings > 0)
10398 iemMemRollback(pVCpu);
10399# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10400 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10401# endif
10402 pVCpu->iem.s.cLongJumps++;
10403 }
10404 IEM_CATCH_LONGJMP_END(pVCpu);
10405#endif
10406
10407 /*
10408 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10409 */
10410 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10412 }
10413 else
10414 {
10415 if (pVCpu->iem.s.cActiveMappings > 0)
10416 iemMemRollback(pVCpu);
10417
10418#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10419 /*
10420 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10421 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10422 */
10423 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10424#endif
10425 }
10426
10427 /*
10428 * Maybe re-enter raw-mode and log.
10429 */
10430 if (rcStrict != VINF_SUCCESS)
10431 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10432 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10433 if (pcInstructions)
10434 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10435 return rcStrict;
10436}
10437
10438
10439/**
10440 * Interface used by EMExecuteExec, does exit statistics and limits.
10441 *
10442 * @returns Strict VBox status code.
10443 * @param pVCpu The cross context virtual CPU structure.
10444 * @param fWillExit To be defined.
10445 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10446 * @param cMaxInstructions Maximum number of instructions to execute.
10447 * @param cMaxInstructionsWithoutExits
10448 * The max number of instructions without exits.
10449 * @param pStats Where to return statistics.
10450 */
10451VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10452 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10453{
10454 NOREF(fWillExit); /** @todo define flexible exit crits */
10455
10456 /*
10457 * Initialize return stats.
10458 */
10459 pStats->cInstructions = 0;
10460 pStats->cExits = 0;
10461 pStats->cMaxExitDistance = 0;
10462 pStats->cReserved = 0;
10463
10464 /*
10465 * Initial decoder init w/ prefetch, then setup setjmp.
10466 */
10467 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10468 if (rcStrict == VINF_SUCCESS)
10469 {
10470#ifdef IEM_WITH_SETJMP
10471 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10472 IEM_TRY_SETJMP(pVCpu, rcStrict)
10473#endif
10474 {
10475#ifdef IN_RING0
10476 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10477#endif
10478 uint32_t cInstructionSinceLastExit = 0;
10479
10480 /*
10481 * The run loop. We limit ourselves to 4096 instructions right now.
10482 */
10483 PVM pVM = pVCpu->CTX_SUFF(pVM);
10484 for (;;)
10485 {
10486 /*
10487 * Log the state.
10488 */
10489#ifdef LOG_ENABLED
10490 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10491#endif
10492
10493 /*
10494 * Do the decoding and emulation.
10495 */
10496 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10497
10498 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10499 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10500
10501 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10502 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10503 {
10504 pStats->cExits += 1;
10505 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10506 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10507 cInstructionSinceLastExit = 0;
10508 }
10509
10510 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10511 {
10512 Assert(pVCpu->iem.s.cActiveMappings == 0);
10513 pVCpu->iem.s.cInstructions++;
10514 pStats->cInstructions++;
10515 cInstructionSinceLastExit++;
10516
10517#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10518 /* Perform any VMX nested-guest instruction boundary actions. */
10519 uint64_t fCpu = pVCpu->fLocalForcedActions;
10520 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10521 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10522 { /* likely */ }
10523 else
10524 {
10525 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10526 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10527 fCpu = pVCpu->fLocalForcedActions;
10528 else
10529 {
10530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10531 break;
10532 }
10533 }
10534#endif
10535 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10536 {
10537#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10538 uint64_t fCpu = pVCpu->fLocalForcedActions;
10539#endif
10540 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10541 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10542 | VMCPU_FF_TLB_FLUSH
10543 | VMCPU_FF_UNHALT );
10544 if (RT_LIKELY( ( ( !fCpu
10545 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10546 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10547 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10548 || pStats->cInstructions < cMinInstructions))
10549 {
10550 if (pStats->cInstructions < cMaxInstructions)
10551 {
10552 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10553 {
10554#ifdef IN_RING0
10555 if ( !fCheckPreemptionPending
10556 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10557#endif
10558 {
10559 Assert(pVCpu->iem.s.cActiveMappings == 0);
10560 iemReInitDecoder(pVCpu);
10561 continue;
10562 }
10563#ifdef IN_RING0
10564 rcStrict = VINF_EM_RAW_INTERRUPT;
10565 break;
10566#endif
10567 }
10568 }
10569 }
10570 Assert(!(fCpu & VMCPU_FF_IEM));
10571 }
10572 Assert(pVCpu->iem.s.cActiveMappings == 0);
10573 }
10574 else if (pVCpu->iem.s.cActiveMappings > 0)
10575 iemMemRollback(pVCpu);
10576 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10577 break;
10578 }
10579 }
10580#ifdef IEM_WITH_SETJMP
10581 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10582 {
10583 if (pVCpu->iem.s.cActiveMappings > 0)
10584 iemMemRollback(pVCpu);
10585 pVCpu->iem.s.cLongJumps++;
10586 }
10587 IEM_CATCH_LONGJMP_END(pVCpu);
10588#endif
10589
10590 /*
10591 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10592 */
10593 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10595 }
10596 else
10597 {
10598 if (pVCpu->iem.s.cActiveMappings > 0)
10599 iemMemRollback(pVCpu);
10600
10601#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10602 /*
10603 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10604 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10605 */
10606 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10607#endif
10608 }
10609
10610 /*
10611 * Maybe re-enter raw-mode and log.
10612 */
10613 if (rcStrict != VINF_SUCCESS)
10614 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10615 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10616 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10617 return rcStrict;
10618}
10619
10620
10621/**
10622 * Injects a trap, fault, abort, software interrupt or external interrupt.
10623 *
10624 * The parameter list matches TRPMQueryTrapAll pretty closely.
10625 *
10626 * @returns Strict VBox status code.
10627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10628 * @param u8TrapNo The trap number.
10629 * @param enmType What type is it (trap/fault/abort), software
10630 * interrupt or hardware interrupt.
10631 * @param uErrCode The error code if applicable.
10632 * @param uCr2 The CR2 value if applicable.
10633 * @param cbInstr The instruction length (only relevant for
10634 * software interrupts).
10635 */
10636VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10637 uint8_t cbInstr)
10638{
10639 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10640#ifdef DBGFTRACE_ENABLED
10641 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10642 u8TrapNo, enmType, uErrCode, uCr2);
10643#endif
10644
10645 uint32_t fFlags;
10646 switch (enmType)
10647 {
10648 case TRPM_HARDWARE_INT:
10649 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10650 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10651 uErrCode = uCr2 = 0;
10652 break;
10653
10654 case TRPM_SOFTWARE_INT:
10655 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10656 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10657 uErrCode = uCr2 = 0;
10658 break;
10659
10660 case TRPM_TRAP:
10661 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10662 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10663 if (u8TrapNo == X86_XCPT_PF)
10664 fFlags |= IEM_XCPT_FLAGS_CR2;
10665 switch (u8TrapNo)
10666 {
10667 case X86_XCPT_DF:
10668 case X86_XCPT_TS:
10669 case X86_XCPT_NP:
10670 case X86_XCPT_SS:
10671 case X86_XCPT_PF:
10672 case X86_XCPT_AC:
10673 case X86_XCPT_GP:
10674 fFlags |= IEM_XCPT_FLAGS_ERR;
10675 break;
10676 }
10677 break;
10678
10679 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10680 }
10681
10682 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10683
10684 if (pVCpu->iem.s.cActiveMappings > 0)
10685 iemMemRollback(pVCpu);
10686
10687 return rcStrict;
10688}
10689
10690
10691/**
10692 * Injects the active TRPM event.
10693 *
10694 * @returns Strict VBox status code.
10695 * @param pVCpu The cross context virtual CPU structure.
10696 */
10697VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10698{
10699#ifndef IEM_IMPLEMENTS_TASKSWITCH
10700 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10701#else
10702 uint8_t u8TrapNo;
10703 TRPMEVENT enmType;
10704 uint32_t uErrCode;
10705 RTGCUINTPTR uCr2;
10706 uint8_t cbInstr;
10707 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10708 if (RT_FAILURE(rc))
10709 return rc;
10710
10711 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10712 * ICEBP \#DB injection as a special case. */
10713 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10714#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10715 if (rcStrict == VINF_SVM_VMEXIT)
10716 rcStrict = VINF_SUCCESS;
10717#endif
10718#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10719 if (rcStrict == VINF_VMX_VMEXIT)
10720 rcStrict = VINF_SUCCESS;
10721#endif
10722 /** @todo Are there any other codes that imply the event was successfully
10723 * delivered to the guest? See @bugref{6607}. */
10724 if ( rcStrict == VINF_SUCCESS
10725 || rcStrict == VINF_IEM_RAISED_XCPT)
10726 TRPMResetTrap(pVCpu);
10727
10728 return rcStrict;
10729#endif
10730}
10731
10732
10733VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10734{
10735 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10736 return VERR_NOT_IMPLEMENTED;
10737}
10738
10739
10740VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10741{
10742 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10743 return VERR_NOT_IMPLEMENTED;
10744}
10745
10746
10747/**
10748 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10749 *
10750 * This API ASSUMES that the caller has already verified that the guest code is
10751 * allowed to access the I/O port. (The I/O port is in the DX register in the
10752 * guest state.)
10753 *
10754 * @returns Strict VBox status code.
10755 * @param pVCpu The cross context virtual CPU structure.
10756 * @param cbValue The size of the I/O port access (1, 2, or 4).
10757 * @param enmAddrMode The addressing mode.
10758 * @param fRepPrefix Indicates whether a repeat prefix is used
10759 * (doesn't matter which for this instruction).
10760 * @param cbInstr The instruction length in bytes.
10761 * @param iEffSeg The effective segment address.
10762 * @param fIoChecked Whether the access to the I/O port has been
10763 * checked or not. It's typically checked in the
10764 * HM scenario.
10765 */
10766VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10767 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10768{
10769 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10771
10772 /*
10773 * State init.
10774 */
10775 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10776
10777 /*
10778 * Switch orgy for getting to the right handler.
10779 */
10780 VBOXSTRICTRC rcStrict;
10781 if (fRepPrefix)
10782 {
10783 switch (enmAddrMode)
10784 {
10785 case IEMMODE_16BIT:
10786 switch (cbValue)
10787 {
10788 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10789 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10790 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10791 default:
10792 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10793 }
10794 break;
10795
10796 case IEMMODE_32BIT:
10797 switch (cbValue)
10798 {
10799 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10800 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10801 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10802 default:
10803 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10804 }
10805 break;
10806
10807 case IEMMODE_64BIT:
10808 switch (cbValue)
10809 {
10810 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10811 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10812 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10813 default:
10814 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10815 }
10816 break;
10817
10818 default:
10819 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10820 }
10821 }
10822 else
10823 {
10824 switch (enmAddrMode)
10825 {
10826 case IEMMODE_16BIT:
10827 switch (cbValue)
10828 {
10829 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10830 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10831 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10832 default:
10833 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10834 }
10835 break;
10836
10837 case IEMMODE_32BIT:
10838 switch (cbValue)
10839 {
10840 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10841 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10842 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10843 default:
10844 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10845 }
10846 break;
10847
10848 case IEMMODE_64BIT:
10849 switch (cbValue)
10850 {
10851 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10852 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10853 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10854 default:
10855 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10856 }
10857 break;
10858
10859 default:
10860 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10861 }
10862 }
10863
10864 if (pVCpu->iem.s.cActiveMappings)
10865 iemMemRollback(pVCpu);
10866
10867 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10868}
10869
10870
10871/**
10872 * Interface for HM and EM for executing string I/O IN (read) instructions.
10873 *
10874 * This API ASSUMES that the caller has already verified that the guest code is
10875 * allowed to access the I/O port. (The I/O port is in the DX register in the
10876 * guest state.)
10877 *
10878 * @returns Strict VBox status code.
10879 * @param pVCpu The cross context virtual CPU structure.
10880 * @param cbValue The size of the I/O port access (1, 2, or 4).
10881 * @param enmAddrMode The addressing mode.
10882 * @param fRepPrefix Indicates whether a repeat prefix is used
10883 * (doesn't matter which for this instruction).
10884 * @param cbInstr The instruction length in bytes.
10885 * @param fIoChecked Whether the access to the I/O port has been
10886 * checked or not. It's typically checked in the
10887 * HM scenario.
10888 */
10889VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10890 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10891{
10892 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10893
10894 /*
10895 * State init.
10896 */
10897 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10898
10899 /*
10900 * Switch orgy for getting to the right handler.
10901 */
10902 VBOXSTRICTRC rcStrict;
10903 if (fRepPrefix)
10904 {
10905 switch (enmAddrMode)
10906 {
10907 case IEMMODE_16BIT:
10908 switch (cbValue)
10909 {
10910 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10911 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10912 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10913 default:
10914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10915 }
10916 break;
10917
10918 case IEMMODE_32BIT:
10919 switch (cbValue)
10920 {
10921 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10922 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10923 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10924 default:
10925 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10926 }
10927 break;
10928
10929 case IEMMODE_64BIT:
10930 switch (cbValue)
10931 {
10932 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10933 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10934 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10935 default:
10936 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10937 }
10938 break;
10939
10940 default:
10941 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10942 }
10943 }
10944 else
10945 {
10946 switch (enmAddrMode)
10947 {
10948 case IEMMODE_16BIT:
10949 switch (cbValue)
10950 {
10951 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10952 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10953 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10954 default:
10955 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10956 }
10957 break;
10958
10959 case IEMMODE_32BIT:
10960 switch (cbValue)
10961 {
10962 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10963 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10964 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10965 default:
10966 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10967 }
10968 break;
10969
10970 case IEMMODE_64BIT:
10971 switch (cbValue)
10972 {
10973 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10974 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10975 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10976 default:
10977 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10978 }
10979 break;
10980
10981 default:
10982 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10983 }
10984 }
10985
10986 if ( pVCpu->iem.s.cActiveMappings == 0
10987 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10988 { /* likely */ }
10989 else
10990 {
10991 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10992 iemMemRollback(pVCpu);
10993 }
10994 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10995}
10996
10997
10998/**
10999 * Interface for rawmode to write execute an OUT instruction.
11000 *
11001 * @returns Strict VBox status code.
11002 * @param pVCpu The cross context virtual CPU structure.
11003 * @param cbInstr The instruction length in bytes.
11004 * @param u16Port The port to read.
11005 * @param fImm Whether the port is specified using an immediate operand or
11006 * using the implicit DX register.
11007 * @param cbReg The register size.
11008 *
11009 * @remarks In ring-0 not all of the state needs to be synced in.
11010 */
11011VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11012{
11013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11014 Assert(cbReg <= 4 && cbReg != 3);
11015
11016 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11017 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11018 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11019 Assert(!pVCpu->iem.s.cActiveMappings);
11020 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11021}
11022
11023
11024/**
11025 * Interface for rawmode to write execute an IN instruction.
11026 *
11027 * @returns Strict VBox status code.
11028 * @param pVCpu The cross context virtual CPU structure.
11029 * @param cbInstr The instruction length in bytes.
11030 * @param u16Port The port to read.
11031 * @param fImm Whether the port is specified using an immediate operand or
11032 * using the implicit DX.
11033 * @param cbReg The register size.
11034 */
11035VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11036{
11037 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11038 Assert(cbReg <= 4 && cbReg != 3);
11039
11040 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11041 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11042 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11043 Assert(!pVCpu->iem.s.cActiveMappings);
11044 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11045}
11046
11047
11048/**
11049 * Interface for HM and EM to write to a CRx register.
11050 *
11051 * @returns Strict VBox status code.
11052 * @param pVCpu The cross context virtual CPU structure.
11053 * @param cbInstr The instruction length in bytes.
11054 * @param iCrReg The control register number (destination).
11055 * @param iGReg The general purpose register number (source).
11056 *
11057 * @remarks In ring-0 not all of the state needs to be synced in.
11058 */
11059VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11060{
11061 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11062 Assert(iCrReg < 16);
11063 Assert(iGReg < 16);
11064
11065 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11066 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11067 Assert(!pVCpu->iem.s.cActiveMappings);
11068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11069}
11070
11071
11072/**
11073 * Interface for HM and EM to read from a CRx register.
11074 *
11075 * @returns Strict VBox status code.
11076 * @param pVCpu The cross context virtual CPU structure.
11077 * @param cbInstr The instruction length in bytes.
11078 * @param iGReg The general purpose register number (destination).
11079 * @param iCrReg The control register number (source).
11080 *
11081 * @remarks In ring-0 not all of the state needs to be synced in.
11082 */
11083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11084{
11085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11086 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11087 | CPUMCTX_EXTRN_APIC_TPR);
11088 Assert(iCrReg < 16);
11089 Assert(iGReg < 16);
11090
11091 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11092 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11093 Assert(!pVCpu->iem.s.cActiveMappings);
11094 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11095}
11096
11097
11098/**
11099 * Interface for HM and EM to write to a DRx register.
11100 *
11101 * @returns Strict VBox status code.
11102 * @param pVCpu The cross context virtual CPU structure.
11103 * @param cbInstr The instruction length in bytes.
11104 * @param iDrReg The debug register number (destination).
11105 * @param iGReg The general purpose register number (source).
11106 *
11107 * @remarks In ring-0 not all of the state needs to be synced in.
11108 */
11109VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11110{
11111 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11112 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11113 Assert(iDrReg < 8);
11114 Assert(iGReg < 16);
11115
11116 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11117 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11118 Assert(!pVCpu->iem.s.cActiveMappings);
11119 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11120}
11121
11122
11123/**
11124 * Interface for HM and EM to read from a DRx register.
11125 *
11126 * @returns Strict VBox status code.
11127 * @param pVCpu The cross context virtual CPU structure.
11128 * @param cbInstr The instruction length in bytes.
11129 * @param iGReg The general purpose register number (destination).
11130 * @param iDrReg The debug register number (source).
11131 *
11132 * @remarks In ring-0 not all of the state needs to be synced in.
11133 */
11134VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11135{
11136 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11137 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11138 Assert(iDrReg < 8);
11139 Assert(iGReg < 16);
11140
11141 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11142 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11143 Assert(!pVCpu->iem.s.cActiveMappings);
11144 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11145}
11146
11147
11148/**
11149 * Interface for HM and EM to clear the CR0[TS] bit.
11150 *
11151 * @returns Strict VBox status code.
11152 * @param pVCpu The cross context virtual CPU structure.
11153 * @param cbInstr The instruction length in bytes.
11154 *
11155 * @remarks In ring-0 not all of the state needs to be synced in.
11156 */
11157VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11158{
11159 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11160
11161 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11162 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11163 Assert(!pVCpu->iem.s.cActiveMappings);
11164 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11165}
11166
11167
11168/**
11169 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11170 *
11171 * @returns Strict VBox status code.
11172 * @param pVCpu The cross context virtual CPU structure.
11173 * @param cbInstr The instruction length in bytes.
11174 * @param uValue The value to load into CR0.
11175 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11176 * memory operand. Otherwise pass NIL_RTGCPTR.
11177 *
11178 * @remarks In ring-0 not all of the state needs to be synced in.
11179 */
11180VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11181{
11182 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11183
11184 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11185 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11186 Assert(!pVCpu->iem.s.cActiveMappings);
11187 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11188}
11189
11190
11191/**
11192 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11193 *
11194 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11195 *
11196 * @returns Strict VBox status code.
11197 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11198 * @param cbInstr The instruction length in bytes.
11199 * @remarks In ring-0 not all of the state needs to be synced in.
11200 * @thread EMT(pVCpu)
11201 */
11202VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11203{
11204 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11205
11206 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11207 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11208 Assert(!pVCpu->iem.s.cActiveMappings);
11209 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11210}
11211
11212
11213/**
11214 * Interface for HM and EM to emulate the WBINVD instruction.
11215 *
11216 * @returns Strict VBox status code.
11217 * @param pVCpu The cross context virtual CPU structure.
11218 * @param cbInstr The instruction length in bytes.
11219 *
11220 * @remarks In ring-0 not all of the state needs to be synced in.
11221 */
11222VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11223{
11224 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11225
11226 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11228 Assert(!pVCpu->iem.s.cActiveMappings);
11229 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11230}
11231
11232
11233/**
11234 * Interface for HM and EM to emulate the INVD instruction.
11235 *
11236 * @returns Strict VBox status code.
11237 * @param pVCpu The cross context virtual CPU structure.
11238 * @param cbInstr The instruction length in bytes.
11239 *
11240 * @remarks In ring-0 not all of the state needs to be synced in.
11241 */
11242VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11243{
11244 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11245
11246 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11247 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11248 Assert(!pVCpu->iem.s.cActiveMappings);
11249 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11250}
11251
11252
11253/**
11254 * Interface for HM and EM to emulate the INVLPG instruction.
11255 *
11256 * @returns Strict VBox status code.
11257 * @retval VINF_PGM_SYNC_CR3
11258 *
11259 * @param pVCpu The cross context virtual CPU structure.
11260 * @param cbInstr The instruction length in bytes.
11261 * @param GCPtrPage The effective address of the page to invalidate.
11262 *
11263 * @remarks In ring-0 not all of the state needs to be synced in.
11264 */
11265VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11266{
11267 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11268
11269 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11270 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11271 Assert(!pVCpu->iem.s.cActiveMappings);
11272 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11273}
11274
11275
11276/**
11277 * Interface for HM and EM to emulate the INVPCID instruction.
11278 *
11279 * @returns Strict VBox status code.
11280 * @retval VINF_PGM_SYNC_CR3
11281 *
11282 * @param pVCpu The cross context virtual CPU structure.
11283 * @param cbInstr The instruction length in bytes.
11284 * @param iEffSeg The effective segment register.
11285 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11286 * @param uType The invalidation type.
11287 *
11288 * @remarks In ring-0 not all of the state needs to be synced in.
11289 */
11290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11291 uint64_t uType)
11292{
11293 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11294
11295 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11296 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11297 Assert(!pVCpu->iem.s.cActiveMappings);
11298 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11299}
11300
11301
11302/**
11303 * Interface for HM and EM to emulate the CPUID instruction.
11304 *
11305 * @returns Strict VBox status code.
11306 *
11307 * @param pVCpu The cross context virtual CPU structure.
11308 * @param cbInstr The instruction length in bytes.
11309 *
11310 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11311 */
11312VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11313{
11314 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11315 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11316
11317 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11318 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11319 Assert(!pVCpu->iem.s.cActiveMappings);
11320 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11321}
11322
11323
11324/**
11325 * Interface for HM and EM to emulate the RDPMC instruction.
11326 *
11327 * @returns Strict VBox status code.
11328 *
11329 * @param pVCpu The cross context virtual CPU structure.
11330 * @param cbInstr The instruction length in bytes.
11331 *
11332 * @remarks Not all of the state needs to be synced in.
11333 */
11334VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11335{
11336 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11337 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11338
11339 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11341 Assert(!pVCpu->iem.s.cActiveMappings);
11342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11343}
11344
11345
11346/**
11347 * Interface for HM and EM to emulate the RDTSC instruction.
11348 *
11349 * @returns Strict VBox status code.
11350 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11351 *
11352 * @param pVCpu The cross context virtual CPU structure.
11353 * @param cbInstr The instruction length in bytes.
11354 *
11355 * @remarks Not all of the state needs to be synced in.
11356 */
11357VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11358{
11359 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11360 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11361
11362 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11363 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11364 Assert(!pVCpu->iem.s.cActiveMappings);
11365 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11366}
11367
11368
11369/**
11370 * Interface for HM and EM to emulate the RDTSCP instruction.
11371 *
11372 * @returns Strict VBox status code.
11373 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11374 *
11375 * @param pVCpu The cross context virtual CPU structure.
11376 * @param cbInstr The instruction length in bytes.
11377 *
11378 * @remarks Not all of the state needs to be synced in. Recommended
11379 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11380 */
11381VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11382{
11383 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11384 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11385
11386 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11387 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11388 Assert(!pVCpu->iem.s.cActiveMappings);
11389 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11390}
11391
11392
11393/**
11394 * Interface for HM and EM to emulate the RDMSR instruction.
11395 *
11396 * @returns Strict VBox status code.
11397 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11398 *
11399 * @param pVCpu The cross context virtual CPU structure.
11400 * @param cbInstr The instruction length in bytes.
11401 *
11402 * @remarks Not all of the state needs to be synced in. Requires RCX and
11403 * (currently) all MSRs.
11404 */
11405VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11406{
11407 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11408 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11409
11410 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11411 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11412 Assert(!pVCpu->iem.s.cActiveMappings);
11413 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11414}
11415
11416
11417/**
11418 * Interface for HM and EM to emulate the WRMSR instruction.
11419 *
11420 * @returns Strict VBox status code.
11421 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11422 *
11423 * @param pVCpu The cross context virtual CPU structure.
11424 * @param cbInstr The instruction length in bytes.
11425 *
11426 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11427 * and (currently) all MSRs.
11428 */
11429VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11430{
11431 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11432 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11433 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11434
11435 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11436 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11437 Assert(!pVCpu->iem.s.cActiveMappings);
11438 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11439}
11440
11441
11442/**
11443 * Interface for HM and EM to emulate the MONITOR instruction.
11444 *
11445 * @returns Strict VBox status code.
11446 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11447 *
11448 * @param pVCpu The cross context virtual CPU structure.
11449 * @param cbInstr The instruction length in bytes.
11450 *
11451 * @remarks Not all of the state needs to be synced in.
11452 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11453 * are used.
11454 */
11455VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11456{
11457 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11458 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11459
11460 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11461 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11462 Assert(!pVCpu->iem.s.cActiveMappings);
11463 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11464}
11465
11466
11467/**
11468 * Interface for HM and EM to emulate the MWAIT instruction.
11469 *
11470 * @returns Strict VBox status code.
11471 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11472 *
11473 * @param pVCpu The cross context virtual CPU structure.
11474 * @param cbInstr The instruction length in bytes.
11475 *
11476 * @remarks Not all of the state needs to be synced in.
11477 */
11478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11479{
11480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11481 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11482
11483 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11484 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11485 Assert(!pVCpu->iem.s.cActiveMappings);
11486 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11487}
11488
11489
11490/**
11491 * Interface for HM and EM to emulate the HLT instruction.
11492 *
11493 * @returns Strict VBox status code.
11494 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11495 *
11496 * @param pVCpu The cross context virtual CPU structure.
11497 * @param cbInstr The instruction length in bytes.
11498 *
11499 * @remarks Not all of the state needs to be synced in.
11500 */
11501VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11502{
11503 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11504
11505 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11506 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11507 Assert(!pVCpu->iem.s.cActiveMappings);
11508 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11509}
11510
11511
11512/**
11513 * Checks if IEM is in the process of delivering an event (interrupt or
11514 * exception).
11515 *
11516 * @returns true if we're in the process of raising an interrupt or exception,
11517 * false otherwise.
11518 * @param pVCpu The cross context virtual CPU structure.
11519 * @param puVector Where to store the vector associated with the
11520 * currently delivered event, optional.
11521 * @param pfFlags Where to store th event delivery flags (see
11522 * IEM_XCPT_FLAGS_XXX), optional.
11523 * @param puErr Where to store the error code associated with the
11524 * event, optional.
11525 * @param puCr2 Where to store the CR2 associated with the event,
11526 * optional.
11527 * @remarks The caller should check the flags to determine if the error code and
11528 * CR2 are valid for the event.
11529 */
11530VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11531{
11532 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11533 if (fRaisingXcpt)
11534 {
11535 if (puVector)
11536 *puVector = pVCpu->iem.s.uCurXcpt;
11537 if (pfFlags)
11538 *pfFlags = pVCpu->iem.s.fCurXcpt;
11539 if (puErr)
11540 *puErr = pVCpu->iem.s.uCurXcptErr;
11541 if (puCr2)
11542 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11543 }
11544 return fRaisingXcpt;
11545}
11546
11547#ifdef IN_RING3
11548
11549/**
11550 * Handles the unlikely and probably fatal merge cases.
11551 *
11552 * @returns Merged status code.
11553 * @param rcStrict Current EM status code.
11554 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11555 * with @a rcStrict.
11556 * @param iMemMap The memory mapping index. For error reporting only.
11557 * @param pVCpu The cross context virtual CPU structure of the calling
11558 * thread, for error reporting only.
11559 */
11560DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11561 unsigned iMemMap, PVMCPUCC pVCpu)
11562{
11563 if (RT_FAILURE_NP(rcStrict))
11564 return rcStrict;
11565
11566 if (RT_FAILURE_NP(rcStrictCommit))
11567 return rcStrictCommit;
11568
11569 if (rcStrict == rcStrictCommit)
11570 return rcStrictCommit;
11571
11572 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11573 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11574 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11575 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11576 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11577 return VERR_IOM_FF_STATUS_IPE;
11578}
11579
11580
11581/**
11582 * Helper for IOMR3ProcessForceFlag.
11583 *
11584 * @returns Merged status code.
11585 * @param rcStrict Current EM status code.
11586 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11587 * with @a rcStrict.
11588 * @param iMemMap The memory mapping index. For error reporting only.
11589 * @param pVCpu The cross context virtual CPU structure of the calling
11590 * thread, for error reporting only.
11591 */
11592DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11593{
11594 /* Simple. */
11595 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11596 return rcStrictCommit;
11597
11598 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11599 return rcStrict;
11600
11601 /* EM scheduling status codes. */
11602 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11603 && rcStrict <= VINF_EM_LAST))
11604 {
11605 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11606 && rcStrictCommit <= VINF_EM_LAST))
11607 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11608 }
11609
11610 /* Unlikely */
11611 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11612}
11613
11614
11615/**
11616 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11617 *
11618 * @returns Merge between @a rcStrict and what the commit operation returned.
11619 * @param pVM The cross context VM structure.
11620 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11621 * @param rcStrict The status code returned by ring-0 or raw-mode.
11622 */
11623VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11624{
11625 /*
11626 * Reset the pending commit.
11627 */
11628 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11629 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11630 ("%#x %#x %#x\n",
11631 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11632 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11633
11634 /*
11635 * Commit the pending bounce buffers (usually just one).
11636 */
11637 unsigned cBufs = 0;
11638 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11639 while (iMemMap-- > 0)
11640 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11641 {
11642 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11643 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11644 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11645
11646 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11647 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11648 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11649
11650 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11651 {
11652 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11653 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11654 pbBuf,
11655 cbFirst,
11656 PGMACCESSORIGIN_IEM);
11657 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11658 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11659 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11660 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11661 }
11662
11663 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11664 {
11665 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11666 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11667 pbBuf + cbFirst,
11668 cbSecond,
11669 PGMACCESSORIGIN_IEM);
11670 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11671 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11672 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11673 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11674 }
11675 cBufs++;
11676 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11677 }
11678
11679 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11680 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11681 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11682 pVCpu->iem.s.cActiveMappings = 0;
11683 return rcStrict;
11684}
11685
11686#endif /* IN_RING3 */
11687
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette