VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100672

最後變更 在這個檔案從100672是 100672,由 vboxsync 提交於 20 月 前

VMM/IEM: Some VxD syscall logging. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 472.0 KB
 
1/* $Id: IEMAll.cpp 100672 2023-07-21 00:28:54Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 RT_NOREF(cbInstr);
803#else
804 RT_NOREF(pVCpu, cbInstr);
805#endif
806}
807
808
809
810#ifdef IEM_WITH_CODE_TLB
811
812/**
813 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
814 * failure and jumps.
815 *
816 * We end up here for a number of reasons:
817 * - pbInstrBuf isn't yet initialized.
818 * - Advancing beyond the buffer boundrary (e.g. cross page).
819 * - Advancing beyond the CS segment limit.
820 * - Fetching from non-mappable page (e.g. MMIO).
821 *
822 * @param pVCpu The cross context virtual CPU structure of the
823 * calling thread.
824 * @param pvDst Where to return the bytes.
825 * @param cbDst Number of bytes to read. A value of zero is
826 * allowed for initializing pbInstrBuf (the
827 * recompiler does this). In this case it is best
828 * to set pbInstrBuf to NULL prior to the call.
829 */
830void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
831{
832# ifdef IN_RING3
833 for (;;)
834 {
835 Assert(cbDst <= 8);
836 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
837
838 /*
839 * We might have a partial buffer match, deal with that first to make the
840 * rest simpler. This is the first part of the cross page/buffer case.
841 */
842 if (pVCpu->iem.s.pbInstrBuf != NULL)
843 {
844 if (offBuf < pVCpu->iem.s.cbInstrBuf)
845 {
846 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
847 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
848 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
849
850 cbDst -= cbCopy;
851 pvDst = (uint8_t *)pvDst + cbCopy;
852 offBuf += cbCopy;
853 pVCpu->iem.s.offInstrNextByte += offBuf;
854 }
855 }
856
857 /*
858 * Check segment limit, figuring how much we're allowed to access at this point.
859 *
860 * We will fault immediately if RIP is past the segment limit / in non-canonical
861 * territory. If we do continue, there are one or more bytes to read before we
862 * end up in trouble and we need to do that first before faulting.
863 */
864 RTGCPTR GCPtrFirst;
865 uint32_t cbMaxRead;
866 if (IEM_IS_64BIT_CODE(pVCpu))
867 {
868 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
869 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
870 { /* likely */ }
871 else
872 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
873 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
874 }
875 else
876 {
877 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
878 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
879 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
880 { /* likely */ }
881 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
882 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
883 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
884 if (cbMaxRead != 0)
885 { /* likely */ }
886 else
887 {
888 /* Overflowed because address is 0 and limit is max. */
889 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
890 cbMaxRead = X86_PAGE_SIZE;
891 }
892 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
893 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
894 if (cbMaxRead2 < cbMaxRead)
895 cbMaxRead = cbMaxRead2;
896 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
897 }
898
899 /*
900 * Get the TLB entry for this piece of code.
901 */
902 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
903 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
904 if (pTlbe->uTag == uTag)
905 {
906 /* likely when executing lots of code, otherwise unlikely */
907# ifdef VBOX_WITH_STATISTICS
908 pVCpu->iem.s.CodeTlb.cTlbHits++;
909# endif
910 }
911 else
912 {
913 pVCpu->iem.s.CodeTlb.cTlbMisses++;
914 PGMPTWALK Walk;
915 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
916 if (RT_FAILURE(rc))
917 {
918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
919 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
920 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
921#endif
922 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
923 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
924 }
925
926 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
927 Assert(Walk.fSucceeded);
928 pTlbe->uTag = uTag;
929 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
930 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
931 pTlbe->GCPhys = Walk.GCPhys;
932 pTlbe->pbMappingR3 = NULL;
933 }
934
935 /*
936 * Check TLB page table level access flags.
937 */
938 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
939 {
940 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
941 {
942 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
944 }
945 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
946 {
947 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
948 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
949 }
950 }
951
952 /*
953 * Look up the physical page info if necessary.
954 */
955 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
956 { /* not necessary */ }
957 else
958 {
959 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
963 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
964 { /* likely */ }
965 else
966 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
967 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
968 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
969 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
970 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
971 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
972 }
973
974# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
975 /*
976 * Try do a direct read using the pbMappingR3 pointer.
977 */
978 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
979 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
980 {
981 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
982 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
983 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
984 {
985 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
986 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
987 }
988 else
989 {
990 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
991 if (cbInstr + (uint32_t)cbDst <= 15)
992 {
993 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
994 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
995 }
996 else
997 {
998 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
999 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1000 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1001 }
1002 }
1003 if (cbDst <= cbMaxRead)
1004 {
1005 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1006 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1007 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1008 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1009 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1010 return;
1011 }
1012 pVCpu->iem.s.pbInstrBuf = NULL;
1013
1014 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1015 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1016 }
1017# else
1018# error "refactor as needed"
1019 /*
1020 * If there is no special read handling, so we can read a bit more and
1021 * put it in the prefetch buffer.
1022 */
1023 if ( cbDst < cbMaxRead
1024 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1025 {
1026 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1027 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1028 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1029 { /* likely */ }
1030 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1031 {
1032 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1033 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1034 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1035 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1036 }
1037 else
1038 {
1039 Log((RT_SUCCESS(rcStrict)
1040 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1041 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1042 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1043 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1044 }
1045 }
1046# endif
1047 /*
1048 * Special read handling, so only read exactly what's needed.
1049 * This is a highly unlikely scenario.
1050 */
1051 else
1052 {
1053 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1054
1055 /* Check instruction length. */
1056 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1057 if (RT_LIKELY(cbInstr + cbDst <= 15))
1058 { /* likely */ }
1059 else
1060 {
1061 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1062 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1063 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1064 }
1065
1066 /* Do the reading. */
1067 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1068 if (cbToRead > 0)
1069 {
1070 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1071 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1072 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1073 { /* likely */ }
1074 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1075 {
1076 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1077 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1078 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1079 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1080 }
1081 else
1082 {
1083 Log((RT_SUCCESS(rcStrict)
1084 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1085 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1086 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1087 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1088 }
1089 }
1090
1091 /* Update the state and probably return. */
1092 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1093 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1094 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1095 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1096 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE;
1097 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1098 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1099 pVCpu->iem.s.pbInstrBuf = NULL;
1100 if (cbToRead == cbDst)
1101 return;
1102 }
1103
1104 /*
1105 * More to read, loop.
1106 */
1107 cbDst -= cbMaxRead;
1108 pvDst = (uint8_t *)pvDst + cbMaxRead;
1109 }
1110# else /* !IN_RING3 */
1111 RT_NOREF(pvDst, cbDst);
1112 if (pvDst || cbDst)
1113 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1114# endif /* !IN_RING3 */
1115}
1116
1117#else /* !IEM_WITH_CODE_TLB */
1118
1119/**
1120 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1121 * exception if it fails.
1122 *
1123 * @returns Strict VBox status code.
1124 * @param pVCpu The cross context virtual CPU structure of the
1125 * calling thread.
1126 * @param cbMin The minimum number of bytes relative offOpcode
1127 * that must be read.
1128 */
1129VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1130{
1131 /*
1132 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1133 *
1134 * First translate CS:rIP to a physical address.
1135 */
1136 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1137 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1138 uint8_t const cbLeft = cbOpcode - offOpcode;
1139 Assert(cbLeft < cbMin);
1140 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1141
1142 uint32_t cbToTryRead;
1143 RTGCPTR GCPtrNext;
1144 if (IEM_IS_64BIT_CODE(pVCpu))
1145 {
1146 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1147 if (!IEM_IS_CANONICAL(GCPtrNext))
1148 return iemRaiseGeneralProtectionFault0(pVCpu);
1149 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1150 }
1151 else
1152 {
1153 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1154 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1155 GCPtrNext32 += cbOpcode;
1156 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1157 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1158 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1159 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1160 if (!cbToTryRead) /* overflowed */
1161 {
1162 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1163 cbToTryRead = UINT32_MAX;
1164 /** @todo check out wrapping around the code segment. */
1165 }
1166 if (cbToTryRead < cbMin - cbLeft)
1167 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1168 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1169
1170 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1171 if (cbToTryRead > cbLeftOnPage)
1172 cbToTryRead = cbLeftOnPage;
1173 }
1174
1175 /* Restrict to opcode buffer space.
1176
1177 We're making ASSUMPTIONS here based on work done previously in
1178 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1179 be fetched in case of an instruction crossing two pages. */
1180 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1181 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1182 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1183 { /* likely */ }
1184 else
1185 {
1186 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1187 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1188 return iemRaiseGeneralProtectionFault0(pVCpu);
1189 }
1190
1191 PGMPTWALK Walk;
1192 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1193 if (RT_FAILURE(rc))
1194 {
1195 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1196#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1197 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1198 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1199#endif
1200 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1201 }
1202 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1203 {
1204 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1205#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1206 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1207 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1208#endif
1209 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1210 }
1211 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1212 {
1213 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1214#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1215 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1216 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1217#endif
1218 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1219 }
1220 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1221 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1222 /** @todo Check reserved bits and such stuff. PGM is better at doing
1223 * that, so do it when implementing the guest virtual address
1224 * TLB... */
1225
1226 /*
1227 * Read the bytes at this address.
1228 *
1229 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1230 * and since PATM should only patch the start of an instruction there
1231 * should be no need to check again here.
1232 */
1233 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1234 {
1235 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1236 cbToTryRead, PGMACCESSORIGIN_IEM);
1237 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1238 { /* likely */ }
1239 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1242 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1243 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1244 }
1245 else
1246 {
1247 Log((RT_SUCCESS(rcStrict)
1248 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1249 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1250 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1251 return rcStrict;
1252 }
1253 }
1254 else
1255 {
1256 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1257 if (RT_SUCCESS(rc))
1258 { /* likely */ }
1259 else
1260 {
1261 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1262 return rc;
1263 }
1264 }
1265 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1266 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1267
1268 return VINF_SUCCESS;
1269}
1270
1271#endif /* !IEM_WITH_CODE_TLB */
1272#ifndef IEM_WITH_SETJMP
1273
1274/**
1275 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pVCpu The cross context virtual CPU structure of the
1279 * calling thread.
1280 * @param pb Where to return the opcode byte.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1289 pVCpu->iem.s.offOpcode = offOpcode + 1;
1290 }
1291 else
1292 *pb = 0;
1293 return rcStrict;
1294}
1295
1296#else /* IEM_WITH_SETJMP */
1297
1298/**
1299 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1300 *
1301 * @returns The opcode byte.
1302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1303 */
1304uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1305{
1306# ifdef IEM_WITH_CODE_TLB
1307 uint8_t u8;
1308 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1309 return u8;
1310# else
1311 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1312 if (rcStrict == VINF_SUCCESS)
1313 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1314 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1315# endif
1316}
1317
1318#endif /* IEM_WITH_SETJMP */
1319
1320#ifndef IEM_WITH_SETJMP
1321
1322/**
1323 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1324 *
1325 * @returns Strict VBox status code.
1326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1327 * @param pu16 Where to return the opcode dword.
1328 */
1329VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1330{
1331 uint8_t u8;
1332 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1333 if (rcStrict == VINF_SUCCESS)
1334 *pu16 = (int8_t)u8;
1335 return rcStrict;
1336}
1337
1338
1339/**
1340 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1341 *
1342 * @returns Strict VBox status code.
1343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1344 * @param pu32 Where to return the opcode dword.
1345 */
1346VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1347{
1348 uint8_t u8;
1349 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1350 if (rcStrict == VINF_SUCCESS)
1351 *pu32 = (int8_t)u8;
1352 return rcStrict;
1353}
1354
1355
1356/**
1357 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1361 * @param pu64 Where to return the opcode qword.
1362 */
1363VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1364{
1365 uint8_t u8;
1366 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1367 if (rcStrict == VINF_SUCCESS)
1368 *pu64 = (int8_t)u8;
1369 return rcStrict;
1370}
1371
1372#endif /* !IEM_WITH_SETJMP */
1373
1374
1375#ifndef IEM_WITH_SETJMP
1376
1377/**
1378 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1379 *
1380 * @returns Strict VBox status code.
1381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1382 * @param pu16 Where to return the opcode word.
1383 */
1384VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1385{
1386 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1387 if (rcStrict == VINF_SUCCESS)
1388 {
1389 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1390# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1391 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1392# else
1393 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1394# endif
1395 pVCpu->iem.s.offOpcode = offOpcode + 2;
1396 }
1397 else
1398 *pu16 = 0;
1399 return rcStrict;
1400}
1401
1402#else /* IEM_WITH_SETJMP */
1403
1404/**
1405 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1406 *
1407 * @returns The opcode word.
1408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1409 */
1410uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1411{
1412# ifdef IEM_WITH_CODE_TLB
1413 uint16_t u16;
1414 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1415 return u16;
1416# else
1417 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1418 if (rcStrict == VINF_SUCCESS)
1419 {
1420 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1421 pVCpu->iem.s.offOpcode += 2;
1422# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1423 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1424# else
1425 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1426# endif
1427 }
1428 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1429# endif
1430}
1431
1432#endif /* IEM_WITH_SETJMP */
1433
1434#ifndef IEM_WITH_SETJMP
1435
1436/**
1437 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1438 *
1439 * @returns Strict VBox status code.
1440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1441 * @param pu32 Where to return the opcode double word.
1442 */
1443VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1444{
1445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1446 if (rcStrict == VINF_SUCCESS)
1447 {
1448 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1449 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1450 pVCpu->iem.s.offOpcode = offOpcode + 2;
1451 }
1452 else
1453 *pu32 = 0;
1454 return rcStrict;
1455}
1456
1457
1458/**
1459 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1463 * @param pu64 Where to return the opcode quad word.
1464 */
1465VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1466{
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1471 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1472 pVCpu->iem.s.offOpcode = offOpcode + 2;
1473 }
1474 else
1475 *pu64 = 0;
1476 return rcStrict;
1477}
1478
1479#endif /* !IEM_WITH_SETJMP */
1480
1481#ifndef IEM_WITH_SETJMP
1482
1483/**
1484 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1485 *
1486 * @returns Strict VBox status code.
1487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1488 * @param pu32 Where to return the opcode dword.
1489 */
1490VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1491{
1492 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1493 if (rcStrict == VINF_SUCCESS)
1494 {
1495 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1496# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1497 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1498# else
1499 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1500 pVCpu->iem.s.abOpcode[offOpcode + 1],
1501 pVCpu->iem.s.abOpcode[offOpcode + 2],
1502 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1503# endif
1504 pVCpu->iem.s.offOpcode = offOpcode + 4;
1505 }
1506 else
1507 *pu32 = 0;
1508 return rcStrict;
1509}
1510
1511#else /* IEM_WITH_SETJMP */
1512
1513/**
1514 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1515 *
1516 * @returns The opcode dword.
1517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1518 */
1519uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1520{
1521# ifdef IEM_WITH_CODE_TLB
1522 uint32_t u32;
1523 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1524 return u32;
1525# else
1526 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1527 if (rcStrict == VINF_SUCCESS)
1528 {
1529 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1530 pVCpu->iem.s.offOpcode = offOpcode + 4;
1531# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1532 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1533# else
1534 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1535 pVCpu->iem.s.abOpcode[offOpcode + 1],
1536 pVCpu->iem.s.abOpcode[offOpcode + 2],
1537 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1538# endif
1539 }
1540 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1541# endif
1542}
1543
1544#endif /* IEM_WITH_SETJMP */
1545
1546#ifndef IEM_WITH_SETJMP
1547
1548/**
1549 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1550 *
1551 * @returns Strict VBox status code.
1552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1553 * @param pu64 Where to return the opcode dword.
1554 */
1555VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1556{
1557 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1558 if (rcStrict == VINF_SUCCESS)
1559 {
1560 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1561 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1562 pVCpu->iem.s.abOpcode[offOpcode + 1],
1563 pVCpu->iem.s.abOpcode[offOpcode + 2],
1564 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1565 pVCpu->iem.s.offOpcode = offOpcode + 4;
1566 }
1567 else
1568 *pu64 = 0;
1569 return rcStrict;
1570}
1571
1572
1573/**
1574 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1575 *
1576 * @returns Strict VBox status code.
1577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1578 * @param pu64 Where to return the opcode qword.
1579 */
1580VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1581{
1582 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1583 if (rcStrict == VINF_SUCCESS)
1584 {
1585 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1586 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1587 pVCpu->iem.s.abOpcode[offOpcode + 1],
1588 pVCpu->iem.s.abOpcode[offOpcode + 2],
1589 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1590 pVCpu->iem.s.offOpcode = offOpcode + 4;
1591 }
1592 else
1593 *pu64 = 0;
1594 return rcStrict;
1595}
1596
1597#endif /* !IEM_WITH_SETJMP */
1598
1599#ifndef IEM_WITH_SETJMP
1600
1601/**
1602 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1603 *
1604 * @returns Strict VBox status code.
1605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1606 * @param pu64 Where to return the opcode qword.
1607 */
1608VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1609{
1610 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1611 if (rcStrict == VINF_SUCCESS)
1612 {
1613 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1614# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1615 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1616# else
1617 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1618 pVCpu->iem.s.abOpcode[offOpcode + 1],
1619 pVCpu->iem.s.abOpcode[offOpcode + 2],
1620 pVCpu->iem.s.abOpcode[offOpcode + 3],
1621 pVCpu->iem.s.abOpcode[offOpcode + 4],
1622 pVCpu->iem.s.abOpcode[offOpcode + 5],
1623 pVCpu->iem.s.abOpcode[offOpcode + 6],
1624 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1625# endif
1626 pVCpu->iem.s.offOpcode = offOpcode + 8;
1627 }
1628 else
1629 *pu64 = 0;
1630 return rcStrict;
1631}
1632
1633#else /* IEM_WITH_SETJMP */
1634
1635/**
1636 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1637 *
1638 * @returns The opcode qword.
1639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1640 */
1641uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1642{
1643# ifdef IEM_WITH_CODE_TLB
1644 uint64_t u64;
1645 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1646 return u64;
1647# else
1648 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1649 if (rcStrict == VINF_SUCCESS)
1650 {
1651 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1652 pVCpu->iem.s.offOpcode = offOpcode + 8;
1653# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1654 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1655# else
1656 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1657 pVCpu->iem.s.abOpcode[offOpcode + 1],
1658 pVCpu->iem.s.abOpcode[offOpcode + 2],
1659 pVCpu->iem.s.abOpcode[offOpcode + 3],
1660 pVCpu->iem.s.abOpcode[offOpcode + 4],
1661 pVCpu->iem.s.abOpcode[offOpcode + 5],
1662 pVCpu->iem.s.abOpcode[offOpcode + 6],
1663 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1664# endif
1665 }
1666 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1667# endif
1668}
1669
1670#endif /* IEM_WITH_SETJMP */
1671
1672
1673
1674/** @name Misc Worker Functions.
1675 * @{
1676 */
1677
1678/**
1679 * Gets the exception class for the specified exception vector.
1680 *
1681 * @returns The class of the specified exception.
1682 * @param uVector The exception vector.
1683 */
1684static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1685{
1686 Assert(uVector <= X86_XCPT_LAST);
1687 switch (uVector)
1688 {
1689 case X86_XCPT_DE:
1690 case X86_XCPT_TS:
1691 case X86_XCPT_NP:
1692 case X86_XCPT_SS:
1693 case X86_XCPT_GP:
1694 case X86_XCPT_SX: /* AMD only */
1695 return IEMXCPTCLASS_CONTRIBUTORY;
1696
1697 case X86_XCPT_PF:
1698 case X86_XCPT_VE: /* Intel only */
1699 return IEMXCPTCLASS_PAGE_FAULT;
1700
1701 case X86_XCPT_DF:
1702 return IEMXCPTCLASS_DOUBLE_FAULT;
1703 }
1704 return IEMXCPTCLASS_BENIGN;
1705}
1706
1707
1708/**
1709 * Evaluates how to handle an exception caused during delivery of another event
1710 * (exception / interrupt).
1711 *
1712 * @returns How to handle the recursive exception.
1713 * @param pVCpu The cross context virtual CPU structure of the
1714 * calling thread.
1715 * @param fPrevFlags The flags of the previous event.
1716 * @param uPrevVector The vector of the previous event.
1717 * @param fCurFlags The flags of the current exception.
1718 * @param uCurVector The vector of the current exception.
1719 * @param pfXcptRaiseInfo Where to store additional information about the
1720 * exception condition. Optional.
1721 */
1722VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1723 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1724{
1725 /*
1726 * Only CPU exceptions can be raised while delivering other events, software interrupt
1727 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1728 */
1729 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1730 Assert(pVCpu); RT_NOREF(pVCpu);
1731 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1732
1733 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1734 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1735 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1736 {
1737 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1738 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1739 {
1740 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1741 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1742 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1743 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1744 {
1745 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1746 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1747 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1748 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1749 uCurVector, pVCpu->cpum.GstCtx.cr2));
1750 }
1751 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1752 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1753 {
1754 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1755 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1756 }
1757 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1758 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1759 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1760 {
1761 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1762 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1763 }
1764 }
1765 else
1766 {
1767 if (uPrevVector == X86_XCPT_NMI)
1768 {
1769 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1770 if (uCurVector == X86_XCPT_PF)
1771 {
1772 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1773 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1774 }
1775 }
1776 else if ( uPrevVector == X86_XCPT_AC
1777 && uCurVector == X86_XCPT_AC)
1778 {
1779 enmRaise = IEMXCPTRAISE_CPU_HANG;
1780 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1781 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1782 }
1783 }
1784 }
1785 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1786 {
1787 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1788 if (uCurVector == X86_XCPT_PF)
1789 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1790 }
1791 else
1792 {
1793 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1794 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1795 }
1796
1797 if (pfXcptRaiseInfo)
1798 *pfXcptRaiseInfo = fRaiseInfo;
1799 return enmRaise;
1800}
1801
1802
1803/**
1804 * Enters the CPU shutdown state initiated by a triple fault or other
1805 * unrecoverable conditions.
1806 *
1807 * @returns Strict VBox status code.
1808 * @param pVCpu The cross context virtual CPU structure of the
1809 * calling thread.
1810 */
1811static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1812{
1813 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1814 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1815
1816 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1817 {
1818 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1819 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1820 }
1821
1822 RT_NOREF(pVCpu);
1823 return VINF_EM_TRIPLE_FAULT;
1824}
1825
1826
1827/**
1828 * Validates a new SS segment.
1829 *
1830 * @returns VBox strict status code.
1831 * @param pVCpu The cross context virtual CPU structure of the
1832 * calling thread.
1833 * @param NewSS The new SS selctor.
1834 * @param uCpl The CPL to load the stack for.
1835 * @param pDesc Where to return the descriptor.
1836 */
1837static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1838{
1839 /* Null selectors are not allowed (we're not called for dispatching
1840 interrupts with SS=0 in long mode). */
1841 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1842 {
1843 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1844 return iemRaiseTaskSwitchFault0(pVCpu);
1845 }
1846
1847 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1848 if ((NewSS & X86_SEL_RPL) != uCpl)
1849 {
1850 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1851 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1852 }
1853
1854 /*
1855 * Read the descriptor.
1856 */
1857 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1858 if (rcStrict != VINF_SUCCESS)
1859 return rcStrict;
1860
1861 /*
1862 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1863 */
1864 if (!pDesc->Legacy.Gen.u1DescType)
1865 {
1866 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1867 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1868 }
1869
1870 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1871 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1877 {
1878 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1879 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1880 }
1881
1882 /* Is it there? */
1883 /** @todo testcase: Is this checked before the canonical / limit check below? */
1884 if (!pDesc->Legacy.Gen.u1Present)
1885 {
1886 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1887 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1888 }
1889
1890 return VINF_SUCCESS;
1891}
1892
1893/** @} */
1894
1895
1896/** @name Raising Exceptions.
1897 *
1898 * @{
1899 */
1900
1901
1902/**
1903 * Loads the specified stack far pointer from the TSS.
1904 *
1905 * @returns VBox strict status code.
1906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1907 * @param uCpl The CPL to load the stack for.
1908 * @param pSelSS Where to return the new stack segment.
1909 * @param puEsp Where to return the new stack pointer.
1910 */
1911static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1912{
1913 VBOXSTRICTRC rcStrict;
1914 Assert(uCpl < 4);
1915
1916 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1917 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1918 {
1919 /*
1920 * 16-bit TSS (X86TSS16).
1921 */
1922 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1923 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1924 {
1925 uint32_t off = uCpl * 4 + 2;
1926 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1927 {
1928 /** @todo check actual access pattern here. */
1929 uint32_t u32Tmp = 0; /* gcc maybe... */
1930 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1931 if (rcStrict == VINF_SUCCESS)
1932 {
1933 *puEsp = RT_LOWORD(u32Tmp);
1934 *pSelSS = RT_HIWORD(u32Tmp);
1935 return VINF_SUCCESS;
1936 }
1937 }
1938 else
1939 {
1940 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1941 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1942 }
1943 break;
1944 }
1945
1946 /*
1947 * 32-bit TSS (X86TSS32).
1948 */
1949 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1950 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1951 {
1952 uint32_t off = uCpl * 8 + 4;
1953 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1954 {
1955/** @todo check actual access pattern here. */
1956 uint64_t u64Tmp;
1957 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1958 if (rcStrict == VINF_SUCCESS)
1959 {
1960 *puEsp = u64Tmp & UINT32_MAX;
1961 *pSelSS = (RTSEL)(u64Tmp >> 32);
1962 return VINF_SUCCESS;
1963 }
1964 }
1965 else
1966 {
1967 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1968 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1969 }
1970 break;
1971 }
1972
1973 default:
1974 AssertFailed();
1975 rcStrict = VERR_IEM_IPE_4;
1976 break;
1977 }
1978
1979 *puEsp = 0; /* make gcc happy */
1980 *pSelSS = 0; /* make gcc happy */
1981 return rcStrict;
1982}
1983
1984
1985/**
1986 * Loads the specified stack pointer from the 64-bit TSS.
1987 *
1988 * @returns VBox strict status code.
1989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1990 * @param uCpl The CPL to load the stack for.
1991 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1992 * @param puRsp Where to return the new stack pointer.
1993 */
1994static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1995{
1996 Assert(uCpl < 4);
1997 Assert(uIst < 8);
1998 *puRsp = 0; /* make gcc happy */
1999
2000 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2001 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2002
2003 uint32_t off;
2004 if (uIst)
2005 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2006 else
2007 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2008 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2009 {
2010 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2011 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2012 }
2013
2014 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2015}
2016
2017
2018/**
2019 * Adjust the CPU state according to the exception being raised.
2020 *
2021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2022 * @param u8Vector The exception that has been raised.
2023 */
2024DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2025{
2026 switch (u8Vector)
2027 {
2028 case X86_XCPT_DB:
2029 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2030 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2031 break;
2032 /** @todo Read the AMD and Intel exception reference... */
2033 }
2034}
2035
2036
2037/**
2038 * Implements exceptions and interrupts for real mode.
2039 *
2040 * @returns VBox strict status code.
2041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2042 * @param cbInstr The number of bytes to offset rIP by in the return
2043 * address.
2044 * @param u8Vector The interrupt / exception vector number.
2045 * @param fFlags The flags.
2046 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2047 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2048 */
2049static VBOXSTRICTRC
2050iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2051 uint8_t cbInstr,
2052 uint8_t u8Vector,
2053 uint32_t fFlags,
2054 uint16_t uErr,
2055 uint64_t uCr2) RT_NOEXCEPT
2056{
2057 NOREF(uErr); NOREF(uCr2);
2058 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2059
2060 /*
2061 * Read the IDT entry.
2062 */
2063 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2064 {
2065 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2066 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2067 }
2068 RTFAR16 Idte;
2069 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2070 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2071 {
2072 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2073 return rcStrict;
2074 }
2075
2076 /*
2077 * Push the stack frame.
2078 */
2079 uint16_t *pu16Frame;
2080 uint64_t uNewRsp;
2081 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2086#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2087 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2088 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2089 fEfl |= UINT16_C(0xf000);
2090#endif
2091 pu16Frame[2] = (uint16_t)fEfl;
2092 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2093 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2094 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2095 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2096 return rcStrict;
2097
2098 /*
2099 * Load the vector address into cs:ip and make exception specific state
2100 * adjustments.
2101 */
2102 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2103 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2104 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2105 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2106 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2107 pVCpu->cpum.GstCtx.rip = Idte.off;
2108 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2109 IEMMISC_SET_EFL(pVCpu, fEfl);
2110
2111 /** @todo do we actually do this in real mode? */
2112 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2113 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2114
2115 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2116 so best leave them alone in case we're in a weird kind of real mode... */
2117
2118 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2119}
2120
2121
2122/**
2123 * Loads a NULL data selector into when coming from V8086 mode.
2124 *
2125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2126 * @param pSReg Pointer to the segment register.
2127 */
2128DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2129{
2130 pSReg->Sel = 0;
2131 pSReg->ValidSel = 0;
2132 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2133 {
2134 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2135 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2136 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2137 }
2138 else
2139 {
2140 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2141 /** @todo check this on AMD-V */
2142 pSReg->u64Base = 0;
2143 pSReg->u32Limit = 0;
2144 }
2145}
2146
2147
2148/**
2149 * Loads a segment selector during a task switch in V8086 mode.
2150 *
2151 * @param pSReg Pointer to the segment register.
2152 * @param uSel The selector value to load.
2153 */
2154DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2155{
2156 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2157 pSReg->Sel = uSel;
2158 pSReg->ValidSel = uSel;
2159 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2160 pSReg->u64Base = uSel << 4;
2161 pSReg->u32Limit = 0xffff;
2162 pSReg->Attr.u = 0xf3;
2163}
2164
2165
2166/**
2167 * Loads a segment selector during a task switch in protected mode.
2168 *
2169 * In this task switch scenario, we would throw \#TS exceptions rather than
2170 * \#GPs.
2171 *
2172 * @returns VBox strict status code.
2173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2174 * @param pSReg Pointer to the segment register.
2175 * @param uSel The new selector value.
2176 *
2177 * @remarks This does _not_ handle CS or SS.
2178 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2179 */
2180static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2181{
2182 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2183
2184 /* Null data selector. */
2185 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2186 {
2187 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2188 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2189 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2190 return VINF_SUCCESS;
2191 }
2192
2193 /* Fetch the descriptor. */
2194 IEMSELDESC Desc;
2195 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2196 if (rcStrict != VINF_SUCCESS)
2197 {
2198 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2199 VBOXSTRICTRC_VAL(rcStrict)));
2200 return rcStrict;
2201 }
2202
2203 /* Must be a data segment or readable code segment. */
2204 if ( !Desc.Legacy.Gen.u1DescType
2205 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2206 {
2207 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2208 Desc.Legacy.Gen.u4Type));
2209 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2210 }
2211
2212 /* Check privileges for data segments and non-conforming code segments. */
2213 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2214 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2215 {
2216 /* The RPL and the new CPL must be less than or equal to the DPL. */
2217 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2218 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2219 {
2220 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2221 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2222 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2223 }
2224 }
2225
2226 /* Is it there? */
2227 if (!Desc.Legacy.Gen.u1Present)
2228 {
2229 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2230 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2231 }
2232
2233 /* The base and limit. */
2234 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2235 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2236
2237 /*
2238 * Ok, everything checked out fine. Now set the accessed bit before
2239 * committing the result into the registers.
2240 */
2241 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2242 {
2243 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2244 if (rcStrict != VINF_SUCCESS)
2245 return rcStrict;
2246 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2247 }
2248
2249 /* Commit */
2250 pSReg->Sel = uSel;
2251 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2252 pSReg->u32Limit = cbLimit;
2253 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2254 pSReg->ValidSel = uSel;
2255 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2256 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2257 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2258
2259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2260 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2261 return VINF_SUCCESS;
2262}
2263
2264
2265/**
2266 * Performs a task switch.
2267 *
2268 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2269 * caller is responsible for performing the necessary checks (like DPL, TSS
2270 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2271 * reference for JMP, CALL, IRET.
2272 *
2273 * If the task switch is the due to a software interrupt or hardware exception,
2274 * the caller is responsible for validating the TSS selector and descriptor. See
2275 * Intel Instruction reference for INT n.
2276 *
2277 * @returns VBox strict status code.
2278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2279 * @param enmTaskSwitch The cause of the task switch.
2280 * @param uNextEip The EIP effective after the task switch.
2281 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2282 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2283 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2284 * @param SelTSS The TSS selector of the new task.
2285 * @param pNewDescTSS Pointer to the new TSS descriptor.
2286 */
2287VBOXSTRICTRC
2288iemTaskSwitch(PVMCPUCC pVCpu,
2289 IEMTASKSWITCH enmTaskSwitch,
2290 uint32_t uNextEip,
2291 uint32_t fFlags,
2292 uint16_t uErr,
2293 uint64_t uCr2,
2294 RTSEL SelTSS,
2295 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2296{
2297 Assert(!IEM_IS_REAL_MODE(pVCpu));
2298 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2299 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2300
2301 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2302 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2303 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2304 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2305 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2306
2307 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2308 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2309
2310 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2311 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2312
2313 /* Update CR2 in case it's a page-fault. */
2314 /** @todo This should probably be done much earlier in IEM/PGM. See
2315 * @bugref{5653#c49}. */
2316 if (fFlags & IEM_XCPT_FLAGS_CR2)
2317 pVCpu->cpum.GstCtx.cr2 = uCr2;
2318
2319 /*
2320 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2321 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2322 */
2323 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2324 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2325 if (uNewTSSLimit < uNewTSSLimitMin)
2326 {
2327 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2328 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2329 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2330 }
2331
2332 /*
2333 * Task switches in VMX non-root mode always cause task switches.
2334 * The new TSS must have been read and validated (DPL, limits etc.) before a
2335 * task-switch VM-exit commences.
2336 *
2337 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2338 */
2339 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2340 {
2341 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2342 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2343 }
2344
2345 /*
2346 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2347 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2348 */
2349 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2350 {
2351 uint32_t const uExitInfo1 = SelTSS;
2352 uint32_t uExitInfo2 = uErr;
2353 switch (enmTaskSwitch)
2354 {
2355 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2356 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2357 default: break;
2358 }
2359 if (fFlags & IEM_XCPT_FLAGS_ERR)
2360 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2361 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2362 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2363
2364 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2365 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2366 RT_NOREF2(uExitInfo1, uExitInfo2);
2367 }
2368
2369 /*
2370 * Check the current TSS limit. The last written byte to the current TSS during the
2371 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2372 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2373 *
2374 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2375 * end up with smaller than "legal" TSS limits.
2376 */
2377 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2378 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2379 if (uCurTSSLimit < uCurTSSLimitMin)
2380 {
2381 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2382 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2383 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2384 }
2385
2386 /*
2387 * Verify that the new TSS can be accessed and map it. Map only the required contents
2388 * and not the entire TSS.
2389 */
2390 void *pvNewTSS;
2391 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2392 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2393 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2394 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2395 * not perform correct translation if this happens. See Intel spec. 7.2.1
2396 * "Task-State Segment". */
2397 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2398 if (rcStrict != VINF_SUCCESS)
2399 {
2400 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2401 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2402 return rcStrict;
2403 }
2404
2405 /*
2406 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2407 */
2408 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2409 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2410 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2411 {
2412 PX86DESC pDescCurTSS;
2413 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2414 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2415 if (rcStrict != VINF_SUCCESS)
2416 {
2417 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2418 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2419 return rcStrict;
2420 }
2421
2422 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2423 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2424 if (rcStrict != VINF_SUCCESS)
2425 {
2426 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2427 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2428 return rcStrict;
2429 }
2430
2431 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2432 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2433 {
2434 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2435 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2436 fEFlags &= ~X86_EFL_NT;
2437 }
2438 }
2439
2440 /*
2441 * Save the CPU state into the current TSS.
2442 */
2443 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2444 if (GCPtrNewTSS == GCPtrCurTSS)
2445 {
2446 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2447 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2448 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2449 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2450 pVCpu->cpum.GstCtx.ldtr.Sel));
2451 }
2452 if (fIsNewTSS386)
2453 {
2454 /*
2455 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2456 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2457 */
2458 void *pvCurTSS32;
2459 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2460 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2461 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2462 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2463 if (rcStrict != VINF_SUCCESS)
2464 {
2465 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2466 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2467 return rcStrict;
2468 }
2469
2470 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2471 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2472 pCurTSS32->eip = uNextEip;
2473 pCurTSS32->eflags = fEFlags;
2474 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2475 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2476 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2477 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2478 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2479 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2480 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2481 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2482 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2483 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2484 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2485 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2486 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2487 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2488
2489 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2490 if (rcStrict != VINF_SUCCESS)
2491 {
2492 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2493 VBOXSTRICTRC_VAL(rcStrict)));
2494 return rcStrict;
2495 }
2496 }
2497 else
2498 {
2499 /*
2500 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2501 */
2502 void *pvCurTSS16;
2503 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2504 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2505 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2506 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2507 if (rcStrict != VINF_SUCCESS)
2508 {
2509 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2510 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2511 return rcStrict;
2512 }
2513
2514 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2515 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2516 pCurTSS16->ip = uNextEip;
2517 pCurTSS16->flags = (uint16_t)fEFlags;
2518 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2519 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2520 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2521 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2522 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2523 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2524 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2525 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2526 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2527 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2528 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2529 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2530
2531 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2532 if (rcStrict != VINF_SUCCESS)
2533 {
2534 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2535 VBOXSTRICTRC_VAL(rcStrict)));
2536 return rcStrict;
2537 }
2538 }
2539
2540 /*
2541 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2542 */
2543 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2544 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2545 {
2546 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2547 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2548 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2549 }
2550
2551 /*
2552 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2553 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2554 */
2555 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2556 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2557 bool fNewDebugTrap;
2558 if (fIsNewTSS386)
2559 {
2560 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2561 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2562 uNewEip = pNewTSS32->eip;
2563 uNewEflags = pNewTSS32->eflags;
2564 uNewEax = pNewTSS32->eax;
2565 uNewEcx = pNewTSS32->ecx;
2566 uNewEdx = pNewTSS32->edx;
2567 uNewEbx = pNewTSS32->ebx;
2568 uNewEsp = pNewTSS32->esp;
2569 uNewEbp = pNewTSS32->ebp;
2570 uNewEsi = pNewTSS32->esi;
2571 uNewEdi = pNewTSS32->edi;
2572 uNewES = pNewTSS32->es;
2573 uNewCS = pNewTSS32->cs;
2574 uNewSS = pNewTSS32->ss;
2575 uNewDS = pNewTSS32->ds;
2576 uNewFS = pNewTSS32->fs;
2577 uNewGS = pNewTSS32->gs;
2578 uNewLdt = pNewTSS32->selLdt;
2579 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2580 }
2581 else
2582 {
2583 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2584 uNewCr3 = 0;
2585 uNewEip = pNewTSS16->ip;
2586 uNewEflags = pNewTSS16->flags;
2587 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2588 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2589 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2590 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2591 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2592 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2593 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2594 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2595 uNewES = pNewTSS16->es;
2596 uNewCS = pNewTSS16->cs;
2597 uNewSS = pNewTSS16->ss;
2598 uNewDS = pNewTSS16->ds;
2599 uNewFS = 0;
2600 uNewGS = 0;
2601 uNewLdt = pNewTSS16->selLdt;
2602 fNewDebugTrap = false;
2603 }
2604
2605 if (GCPtrNewTSS == GCPtrCurTSS)
2606 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2607 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2608
2609 /*
2610 * We're done accessing the new TSS.
2611 */
2612 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2616 return rcStrict;
2617 }
2618
2619 /*
2620 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2621 */
2622 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2623 {
2624 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2625 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2626 if (rcStrict != VINF_SUCCESS)
2627 {
2628 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2629 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2630 return rcStrict;
2631 }
2632
2633 /* Check that the descriptor indicates the new TSS is available (not busy). */
2634 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2635 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2636 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2637
2638 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2639 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2643 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646 }
2647
2648 /*
2649 * From this point on, we're technically in the new task. We will defer exceptions
2650 * until the completion of the task switch but before executing any instructions in the new task.
2651 */
2652 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2653 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2654 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2655 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2656 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2657 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2658 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2659
2660 /* Set the busy bit in TR. */
2661 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2662
2663 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2664 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2665 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2666 {
2667 uNewEflags |= X86_EFL_NT;
2668 }
2669
2670 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2671 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2672 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2673
2674 pVCpu->cpum.GstCtx.eip = uNewEip;
2675 pVCpu->cpum.GstCtx.eax = uNewEax;
2676 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2677 pVCpu->cpum.GstCtx.edx = uNewEdx;
2678 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2679 pVCpu->cpum.GstCtx.esp = uNewEsp;
2680 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2681 pVCpu->cpum.GstCtx.esi = uNewEsi;
2682 pVCpu->cpum.GstCtx.edi = uNewEdi;
2683
2684 uNewEflags &= X86_EFL_LIVE_MASK;
2685 uNewEflags |= X86_EFL_RA1_MASK;
2686 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2687
2688 /*
2689 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2690 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2691 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2692 */
2693 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2694 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2695
2696 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2697 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2698
2699 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2700 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2701
2702 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2703 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2704
2705 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2706 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2707
2708 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2709 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2711
2712 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2713 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2714 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2715 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2716
2717 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2718 {
2719 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2720 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2721 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2722 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2723 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2724 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2725 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2726 }
2727
2728 /*
2729 * Switch CR3 for the new task.
2730 */
2731 if ( fIsNewTSS386
2732 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2733 {
2734 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2735 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2736 AssertRCSuccessReturn(rc, rc);
2737
2738 /* Inform PGM. */
2739 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2740 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2741 AssertRCReturn(rc, rc);
2742 /* ignore informational status codes */
2743
2744 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2745 }
2746
2747 /*
2748 * Switch LDTR for the new task.
2749 */
2750 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2751 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2752 else
2753 {
2754 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2755
2756 IEMSELDESC DescNewLdt;
2757 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2758 if (rcStrict != VINF_SUCCESS)
2759 {
2760 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2761 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2762 return rcStrict;
2763 }
2764 if ( !DescNewLdt.Legacy.Gen.u1Present
2765 || DescNewLdt.Legacy.Gen.u1DescType
2766 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2767 {
2768 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2769 uNewLdt, DescNewLdt.Legacy.u));
2770 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2771 }
2772
2773 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2774 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2775 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2776 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2777 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2778 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2779 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2781 }
2782
2783 IEMSELDESC DescSS;
2784 if (IEM_IS_V86_MODE(pVCpu))
2785 {
2786 IEM_SET_CPL(pVCpu, 3);
2787 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2788 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2789 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2790 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2791 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2792 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2793
2794 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2795 DescSS.Legacy.u = 0;
2796 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2797 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2798 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2799 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2800 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2801 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2802 DescSS.Legacy.Gen.u2Dpl = 3;
2803 }
2804 else
2805 {
2806 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2807
2808 /*
2809 * Load the stack segment for the new task.
2810 */
2811 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2812 {
2813 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2814 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2815 }
2816
2817 /* Fetch the descriptor. */
2818 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2819 if (rcStrict != VINF_SUCCESS)
2820 {
2821 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2822 VBOXSTRICTRC_VAL(rcStrict)));
2823 return rcStrict;
2824 }
2825
2826 /* SS must be a data segment and writable. */
2827 if ( !DescSS.Legacy.Gen.u1DescType
2828 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2829 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2830 {
2831 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2832 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2833 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2834 }
2835
2836 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2837 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2838 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2839 {
2840 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2841 uNewCpl));
2842 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2843 }
2844
2845 /* Is it there? */
2846 if (!DescSS.Legacy.Gen.u1Present)
2847 {
2848 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2849 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2850 }
2851
2852 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2853 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2854
2855 /* Set the accessed bit before committing the result into SS. */
2856 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2857 {
2858 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2859 if (rcStrict != VINF_SUCCESS)
2860 return rcStrict;
2861 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2862 }
2863
2864 /* Commit SS. */
2865 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2866 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2867 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2868 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2869 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2870 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2871 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2872
2873 /* CPL has changed, update IEM before loading rest of segments. */
2874 IEM_SET_CPL(pVCpu, uNewCpl);
2875
2876 /*
2877 * Load the data segments for the new task.
2878 */
2879 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2880 if (rcStrict != VINF_SUCCESS)
2881 return rcStrict;
2882 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2883 if (rcStrict != VINF_SUCCESS)
2884 return rcStrict;
2885 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2886 if (rcStrict != VINF_SUCCESS)
2887 return rcStrict;
2888 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2889 if (rcStrict != VINF_SUCCESS)
2890 return rcStrict;
2891
2892 /*
2893 * Load the code segment for the new task.
2894 */
2895 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2896 {
2897 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2898 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2899 }
2900
2901 /* Fetch the descriptor. */
2902 IEMSELDESC DescCS;
2903 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2904 if (rcStrict != VINF_SUCCESS)
2905 {
2906 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2907 return rcStrict;
2908 }
2909
2910 /* CS must be a code segment. */
2911 if ( !DescCS.Legacy.Gen.u1DescType
2912 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2913 {
2914 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2915 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2916 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2917 }
2918
2919 /* For conforming CS, DPL must be less than or equal to the RPL. */
2920 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2921 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2922 {
2923 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2924 DescCS.Legacy.Gen.u2Dpl));
2925 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2926 }
2927
2928 /* For non-conforming CS, DPL must match RPL. */
2929 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2930 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2931 {
2932 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2933 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2934 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2935 }
2936
2937 /* Is it there? */
2938 if (!DescCS.Legacy.Gen.u1Present)
2939 {
2940 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2941 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2945 u64Base = X86DESC_BASE(&DescCS.Legacy);
2946
2947 /* Set the accessed bit before committing the result into CS. */
2948 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2949 {
2950 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2951 if (rcStrict != VINF_SUCCESS)
2952 return rcStrict;
2953 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2954 }
2955
2956 /* Commit CS. */
2957 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2958 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2959 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2960 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2961 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2962 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2963 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2964 }
2965
2966 /* Make sure the CPU mode is correct. */
2967 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2968 if (fExecNew != pVCpu->iem.s.fExec)
2969 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2970 pVCpu->iem.s.fExec = fExecNew;
2971
2972 /** @todo Debug trap. */
2973 if (fIsNewTSS386 && fNewDebugTrap)
2974 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2975
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t uExt;
2981 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2982 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2983 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2984 uExt = 1;
2985 else
2986 uExt = 0;
2987
2988 /*
2989 * Push any error code on to the new stack.
2990 */
2991 if (fFlags & IEM_XCPT_FLAGS_ERR)
2992 {
2993 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2994 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2995 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2996
2997 /* Check that there is sufficient space on the stack. */
2998 /** @todo Factor out segment limit checking for normal/expand down segments
2999 * into a separate function. */
3000 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3001 {
3002 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3003 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3004 {
3005 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3006 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3007 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3008 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3009 }
3010 }
3011 else
3012 {
3013 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3014 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3015 {
3016 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3017 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3018 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3019 }
3020 }
3021
3022
3023 if (fIsNewTSS386)
3024 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3025 else
3026 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3027 if (rcStrict != VINF_SUCCESS)
3028 {
3029 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3030 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3031 return rcStrict;
3032 }
3033 }
3034
3035 /* Check the new EIP against the new CS limit. */
3036 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3037 {
3038 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3039 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3040 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3041 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3042 }
3043
3044 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3045 pVCpu->cpum.GstCtx.ss.Sel));
3046 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3047}
3048
3049
3050/**
3051 * Implements exceptions and interrupts for protected mode.
3052 *
3053 * @returns VBox strict status code.
3054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3055 * @param cbInstr The number of bytes to offset rIP by in the return
3056 * address.
3057 * @param u8Vector The interrupt / exception vector number.
3058 * @param fFlags The flags.
3059 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3060 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3061 */
3062static VBOXSTRICTRC
3063iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3064 uint8_t cbInstr,
3065 uint8_t u8Vector,
3066 uint32_t fFlags,
3067 uint16_t uErr,
3068 uint64_t uCr2) RT_NOEXCEPT
3069{
3070 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3071
3072 /*
3073 * Read the IDT entry.
3074 */
3075 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3076 {
3077 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3078 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3079 }
3080 X86DESC Idte;
3081 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3082 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3083 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3084 {
3085 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3086 return rcStrict;
3087 }
3088 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3089 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3090 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3091 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3092
3093 /*
3094 * Check the descriptor type, DPL and such.
3095 * ASSUMES this is done in the same order as described for call-gate calls.
3096 */
3097 if (Idte.Gate.u1DescType)
3098 {
3099 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3100 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3101 }
3102 bool fTaskGate = false;
3103 uint8_t f32BitGate = true;
3104 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3105 switch (Idte.Gate.u4Type)
3106 {
3107 case X86_SEL_TYPE_SYS_UNDEFINED:
3108 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3109 case X86_SEL_TYPE_SYS_LDT:
3110 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3111 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3112 case X86_SEL_TYPE_SYS_UNDEFINED2:
3113 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3114 case X86_SEL_TYPE_SYS_UNDEFINED3:
3115 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3116 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3117 case X86_SEL_TYPE_SYS_UNDEFINED4:
3118 {
3119 /** @todo check what actually happens when the type is wrong...
3120 * esp. call gates. */
3121 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3122 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3123 }
3124
3125 case X86_SEL_TYPE_SYS_286_INT_GATE:
3126 f32BitGate = false;
3127 RT_FALL_THRU();
3128 case X86_SEL_TYPE_SYS_386_INT_GATE:
3129 fEflToClear |= X86_EFL_IF;
3130 break;
3131
3132 case X86_SEL_TYPE_SYS_TASK_GATE:
3133 fTaskGate = true;
3134#ifndef IEM_IMPLEMENTS_TASKSWITCH
3135 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3136#endif
3137 break;
3138
3139 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3140 f32BitGate = false;
3141 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3142 break;
3143
3144 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3145 }
3146
3147 /* Check DPL against CPL if applicable. */
3148 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3149 {
3150 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3151 {
3152 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3153 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3154 }
3155 }
3156
3157 /* Is it there? */
3158 if (!Idte.Gate.u1Present)
3159 {
3160 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3161 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3162 }
3163
3164 /* Is it a task-gate? */
3165 if (fTaskGate)
3166 {
3167 /*
3168 * Construct the error code masks based on what caused this task switch.
3169 * See Intel Instruction reference for INT.
3170 */
3171 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3172 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3173 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3174 RTSEL SelTSS = Idte.Gate.u16Sel;
3175
3176 /*
3177 * Fetch the TSS descriptor in the GDT.
3178 */
3179 IEMSELDESC DescTSS;
3180 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3181 if (rcStrict != VINF_SUCCESS)
3182 {
3183 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3184 VBOXSTRICTRC_VAL(rcStrict)));
3185 return rcStrict;
3186 }
3187
3188 /* The TSS descriptor must be a system segment and be available (not busy). */
3189 if ( DescTSS.Legacy.Gen.u1DescType
3190 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3191 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3192 {
3193 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3194 u8Vector, SelTSS, DescTSS.Legacy.au64));
3195 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3196 }
3197
3198 /* The TSS must be present. */
3199 if (!DescTSS.Legacy.Gen.u1Present)
3200 {
3201 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3202 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3203 }
3204
3205 /* Do the actual task switch. */
3206 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3207 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3208 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3209 }
3210
3211 /* A null CS is bad. */
3212 RTSEL NewCS = Idte.Gate.u16Sel;
3213 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3214 {
3215 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3216 return iemRaiseGeneralProtectionFault0(pVCpu);
3217 }
3218
3219 /* Fetch the descriptor for the new CS. */
3220 IEMSELDESC DescCS;
3221 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3222 if (rcStrict != VINF_SUCCESS)
3223 {
3224 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3225 return rcStrict;
3226 }
3227
3228 /* Must be a code segment. */
3229 if (!DescCS.Legacy.Gen.u1DescType)
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3232 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3233 }
3234 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3235 {
3236 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3237 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3238 }
3239
3240 /* Don't allow lowering the privilege level. */
3241 /** @todo Does the lowering of privileges apply to software interrupts
3242 * only? This has bearings on the more-privileged or
3243 * same-privilege stack behavior further down. A testcase would
3244 * be nice. */
3245 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3246 {
3247 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3248 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3249 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3250 }
3251
3252 /* Make sure the selector is present. */
3253 if (!DescCS.Legacy.Gen.u1Present)
3254 {
3255 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3256 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3257 }
3258
3259#ifdef LOG_ENABLED
3260 /* If software interrupt, try decode it if logging is enabled and such. */
3261 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3262 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3263 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3264#endif
3265
3266 /* Check the new EIP against the new CS limit. */
3267 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3268 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3269 ? Idte.Gate.u16OffsetLow
3270 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3271 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3272 if (uNewEip > cbLimitCS)
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3275 u8Vector, uNewEip, cbLimitCS, NewCS));
3276 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3277 }
3278 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3279
3280 /* Calc the flag image to push. */
3281 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3282 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3283 fEfl &= ~X86_EFL_RF;
3284 else
3285 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3286
3287 /* From V8086 mode only go to CPL 0. */
3288 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3289 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3290 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3291 {
3292 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3293 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3294 }
3295
3296 /*
3297 * If the privilege level changes, we need to get a new stack from the TSS.
3298 * This in turns means validating the new SS and ESP...
3299 */
3300 if (uNewCpl != IEM_GET_CPL(pVCpu))
3301 {
3302 RTSEL NewSS;
3303 uint32_t uNewEsp;
3304 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3305 if (rcStrict != VINF_SUCCESS)
3306 return rcStrict;
3307
3308 IEMSELDESC DescSS;
3309 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3310 if (rcStrict != VINF_SUCCESS)
3311 return rcStrict;
3312 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3313 if (!DescSS.Legacy.Gen.u1DefBig)
3314 {
3315 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3316 uNewEsp = (uint16_t)uNewEsp;
3317 }
3318
3319 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3320
3321 /* Check that there is sufficient space for the stack frame. */
3322 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3323 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3324 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3325 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3326
3327 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3328 {
3329 if ( uNewEsp - 1 > cbLimitSS
3330 || uNewEsp < cbStackFrame)
3331 {
3332 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3333 u8Vector, NewSS, uNewEsp, cbStackFrame));
3334 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3335 }
3336 }
3337 else
3338 {
3339 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3340 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3341 {
3342 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3343 u8Vector, NewSS, uNewEsp, cbStackFrame));
3344 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3345 }
3346 }
3347
3348 /*
3349 * Start making changes.
3350 */
3351
3352 /* Set the new CPL so that stack accesses use it. */
3353 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3354 IEM_SET_CPL(pVCpu, uNewCpl);
3355
3356 /* Create the stack frame. */
3357 RTPTRUNION uStackFrame;
3358 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3359 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3360 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3361 if (rcStrict != VINF_SUCCESS)
3362 return rcStrict;
3363 void * const pvStackFrame = uStackFrame.pv;
3364 if (f32BitGate)
3365 {
3366 if (fFlags & IEM_XCPT_FLAGS_ERR)
3367 *uStackFrame.pu32++ = uErr;
3368 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3369 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3370 uStackFrame.pu32[2] = fEfl;
3371 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3372 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3373 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3374 if (fEfl & X86_EFL_VM)
3375 {
3376 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3377 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3378 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3379 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3380 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3381 }
3382 }
3383 else
3384 {
3385 if (fFlags & IEM_XCPT_FLAGS_ERR)
3386 *uStackFrame.pu16++ = uErr;
3387 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3388 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3389 uStackFrame.pu16[2] = fEfl;
3390 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3391 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3392 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3393 if (fEfl & X86_EFL_VM)
3394 {
3395 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3396 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3397 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3398 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3399 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3400 }
3401 }
3402 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3403 if (rcStrict != VINF_SUCCESS)
3404 return rcStrict;
3405
3406 /* Mark the selectors 'accessed' (hope this is the correct time). */
3407 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3408 * after pushing the stack frame? (Write protect the gdt + stack to
3409 * find out.) */
3410 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3411 {
3412 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3413 if (rcStrict != VINF_SUCCESS)
3414 return rcStrict;
3415 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3416 }
3417
3418 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3419 {
3420 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3424 }
3425
3426 /*
3427 * Start comitting the register changes (joins with the DPL=CPL branch).
3428 */
3429 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3430 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3431 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3432 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3433 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3434 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3435 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3436 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3437 * SP is loaded).
3438 * Need to check the other combinations too:
3439 * - 16-bit TSS, 32-bit handler
3440 * - 32-bit TSS, 16-bit handler */
3441 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3442 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3443 else
3444 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3445
3446 if (fEfl & X86_EFL_VM)
3447 {
3448 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3449 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3450 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3451 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3452 }
3453 }
3454 /*
3455 * Same privilege, no stack change and smaller stack frame.
3456 */
3457 else
3458 {
3459 uint64_t uNewRsp;
3460 RTPTRUNION uStackFrame;
3461 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3462 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3463 if (rcStrict != VINF_SUCCESS)
3464 return rcStrict;
3465 void * const pvStackFrame = uStackFrame.pv;
3466
3467 if (f32BitGate)
3468 {
3469 if (fFlags & IEM_XCPT_FLAGS_ERR)
3470 *uStackFrame.pu32++ = uErr;
3471 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3472 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3473 uStackFrame.pu32[2] = fEfl;
3474 }
3475 else
3476 {
3477 if (fFlags & IEM_XCPT_FLAGS_ERR)
3478 *uStackFrame.pu16++ = uErr;
3479 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3480 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3481 uStackFrame.pu16[2] = fEfl;
3482 }
3483 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3484 if (rcStrict != VINF_SUCCESS)
3485 return rcStrict;
3486
3487 /* Mark the CS selector as 'accessed'. */
3488 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3489 {
3490 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3494 }
3495
3496 /*
3497 * Start committing the register changes (joins with the other branch).
3498 */
3499 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3500 }
3501
3502 /* ... register committing continues. */
3503 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3504 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3505 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3506 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3507 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3508 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3509
3510 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3511 fEfl &= ~fEflToClear;
3512 IEMMISC_SET_EFL(pVCpu, fEfl);
3513
3514 if (fFlags & IEM_XCPT_FLAGS_CR2)
3515 pVCpu->cpum.GstCtx.cr2 = uCr2;
3516
3517 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3518 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3519
3520 /* Make sure the execution flags are correct. */
3521 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3522 if (fExecNew != pVCpu->iem.s.fExec)
3523 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3524 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3525 pVCpu->iem.s.fExec = fExecNew;
3526 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3527
3528 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3529}
3530
3531
3532/**
3533 * Implements exceptions and interrupts for long mode.
3534 *
3535 * @returns VBox strict status code.
3536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3537 * @param cbInstr The number of bytes to offset rIP by in the return
3538 * address.
3539 * @param u8Vector The interrupt / exception vector number.
3540 * @param fFlags The flags.
3541 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3542 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3543 */
3544static VBOXSTRICTRC
3545iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3546 uint8_t cbInstr,
3547 uint8_t u8Vector,
3548 uint32_t fFlags,
3549 uint16_t uErr,
3550 uint64_t uCr2) RT_NOEXCEPT
3551{
3552 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3553
3554 /*
3555 * Read the IDT entry.
3556 */
3557 uint16_t offIdt = (uint16_t)u8Vector << 4;
3558 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3559 {
3560 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3561 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3562 }
3563 X86DESC64 Idte;
3564#ifdef _MSC_VER /* Shut up silly compiler warning. */
3565 Idte.au64[0] = 0;
3566 Idte.au64[1] = 0;
3567#endif
3568 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3569 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3570 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3571 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3572 {
3573 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3574 return rcStrict;
3575 }
3576 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3577 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3578 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3579
3580 /*
3581 * Check the descriptor type, DPL and such.
3582 * ASSUMES this is done in the same order as described for call-gate calls.
3583 */
3584 if (Idte.Gate.u1DescType)
3585 {
3586 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3587 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3588 }
3589 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3590 switch (Idte.Gate.u4Type)
3591 {
3592 case AMD64_SEL_TYPE_SYS_INT_GATE:
3593 fEflToClear |= X86_EFL_IF;
3594 break;
3595 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3596 break;
3597
3598 default:
3599 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3600 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3601 }
3602
3603 /* Check DPL against CPL if applicable. */
3604 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3605 {
3606 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3607 {
3608 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3609 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3610 }
3611 }
3612
3613 /* Is it there? */
3614 if (!Idte.Gate.u1Present)
3615 {
3616 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3617 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3618 }
3619
3620 /* A null CS is bad. */
3621 RTSEL NewCS = Idte.Gate.u16Sel;
3622 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3623 {
3624 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3625 return iemRaiseGeneralProtectionFault0(pVCpu);
3626 }
3627
3628 /* Fetch the descriptor for the new CS. */
3629 IEMSELDESC DescCS;
3630 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3631 if (rcStrict != VINF_SUCCESS)
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636
3637 /* Must be a 64-bit code segment. */
3638 if (!DescCS.Long.Gen.u1DescType)
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3641 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3642 }
3643 if ( !DescCS.Long.Gen.u1Long
3644 || DescCS.Long.Gen.u1DefBig
3645 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3648 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3649 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3650 }
3651
3652 /* Don't allow lowering the privilege level. For non-conforming CS
3653 selectors, the CS.DPL sets the privilege level the trap/interrupt
3654 handler runs at. For conforming CS selectors, the CPL remains
3655 unchanged, but the CS.DPL must be <= CPL. */
3656 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3657 * when CPU in Ring-0. Result \#GP? */
3658 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3659 {
3660 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3661 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3662 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3663 }
3664
3665
3666 /* Make sure the selector is present. */
3667 if (!DescCS.Legacy.Gen.u1Present)
3668 {
3669 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3670 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3671 }
3672
3673 /* Check that the new RIP is canonical. */
3674 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3675 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3676 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3677 if (!IEM_IS_CANONICAL(uNewRip))
3678 {
3679 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3680 return iemRaiseGeneralProtectionFault0(pVCpu);
3681 }
3682
3683 /*
3684 * If the privilege level changes or if the IST isn't zero, we need to get
3685 * a new stack from the TSS.
3686 */
3687 uint64_t uNewRsp;
3688 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3689 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3690 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3691 || Idte.Gate.u3IST != 0)
3692 {
3693 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3694 if (rcStrict != VINF_SUCCESS)
3695 return rcStrict;
3696 }
3697 else
3698 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3699 uNewRsp &= ~(uint64_t)0xf;
3700
3701 /*
3702 * Calc the flag image to push.
3703 */
3704 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3705 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3706 fEfl &= ~X86_EFL_RF;
3707 else
3708 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3709
3710 /*
3711 * Start making changes.
3712 */
3713 /* Set the new CPL so that stack accesses use it. */
3714 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3715 IEM_SET_CPL(pVCpu, uNewCpl);
3716/** @todo Setting CPL this early seems wrong as it would affect and errors we
3717 * raise accessing the stack and (?) GDT/LDT... */
3718
3719 /* Create the stack frame. */
3720 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3721 RTPTRUNION uStackFrame;
3722 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3723 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3724 if (rcStrict != VINF_SUCCESS)
3725 return rcStrict;
3726 void * const pvStackFrame = uStackFrame.pv;
3727
3728 if (fFlags & IEM_XCPT_FLAGS_ERR)
3729 *uStackFrame.pu64++ = uErr;
3730 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3731 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3732 uStackFrame.pu64[2] = fEfl;
3733 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3734 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3735 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3736 if (rcStrict != VINF_SUCCESS)
3737 return rcStrict;
3738
3739 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3740 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3741 * after pushing the stack frame? (Write protect the gdt + stack to
3742 * find out.) */
3743 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3744 {
3745 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3746 if (rcStrict != VINF_SUCCESS)
3747 return rcStrict;
3748 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3749 }
3750
3751 /*
3752 * Start comitting the register changes.
3753 */
3754 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3755 * hidden registers when interrupting 32-bit or 16-bit code! */
3756 if (uNewCpl != uOldCpl)
3757 {
3758 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3759 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3760 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3762 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3763 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3764 }
3765 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3766 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3767 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3768 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3769 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3770 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3771 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3772 pVCpu->cpum.GstCtx.rip = uNewRip;
3773
3774 fEfl &= ~fEflToClear;
3775 IEMMISC_SET_EFL(pVCpu, fEfl);
3776
3777 if (fFlags & IEM_XCPT_FLAGS_CR2)
3778 pVCpu->cpum.GstCtx.cr2 = uCr2;
3779
3780 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3781 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3782
3783 iemRecalcExecModeAndCplFlags(pVCpu);
3784
3785 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3786}
3787
3788
3789/**
3790 * Implements exceptions and interrupts.
3791 *
3792 * All exceptions and interrupts goes thru this function!
3793 *
3794 * @returns VBox strict status code.
3795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3796 * @param cbInstr The number of bytes to offset rIP by in the return
3797 * address.
3798 * @param u8Vector The interrupt / exception vector number.
3799 * @param fFlags The flags.
3800 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3801 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3802 */
3803VBOXSTRICTRC
3804iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3805 uint8_t cbInstr,
3806 uint8_t u8Vector,
3807 uint32_t fFlags,
3808 uint16_t uErr,
3809 uint64_t uCr2) RT_NOEXCEPT
3810{
3811 /*
3812 * Get all the state that we might need here.
3813 */
3814 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3815 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3816
3817#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3818 /*
3819 * Flush prefetch buffer
3820 */
3821 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3822#endif
3823
3824 /*
3825 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3826 */
3827 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3828 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3829 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3830 | IEM_XCPT_FLAGS_BP_INSTR
3831 | IEM_XCPT_FLAGS_ICEBP_INSTR
3832 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3833 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3834 {
3835 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3836 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3837 u8Vector = X86_XCPT_GP;
3838 uErr = 0;
3839 }
3840#ifdef DBGFTRACE_ENABLED
3841 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3842 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3843 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3844#endif
3845
3846 /*
3847 * Evaluate whether NMI blocking should be in effect.
3848 * Normally, NMI blocking is in effect whenever we inject an NMI.
3849 */
3850 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3851 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3852
3853#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3854 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3855 {
3856 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3857 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3858 return rcStrict0;
3859
3860 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3861 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3862 {
3863 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3864 fBlockNmi = false;
3865 }
3866 }
3867#endif
3868
3869#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3870 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3871 {
3872 /*
3873 * If the event is being injected as part of VMRUN, it isn't subject to event
3874 * intercepts in the nested-guest. However, secondary exceptions that occur
3875 * during injection of any event -are- subject to exception intercepts.
3876 *
3877 * See AMD spec. 15.20 "Event Injection".
3878 */
3879 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3880 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3881 else
3882 {
3883 /*
3884 * Check and handle if the event being raised is intercepted.
3885 */
3886 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3887 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3888 return rcStrict0;
3889 }
3890 }
3891#endif
3892
3893 /*
3894 * Set NMI blocking if necessary.
3895 */
3896 if (fBlockNmi)
3897 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3898
3899 /*
3900 * Do recursion accounting.
3901 */
3902 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3903 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3904 if (pVCpu->iem.s.cXcptRecursions == 0)
3905 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3906 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3907 else
3908 {
3909 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3910 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3911 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3912
3913 if (pVCpu->iem.s.cXcptRecursions >= 4)
3914 {
3915#ifdef DEBUG_bird
3916 AssertFailed();
3917#endif
3918 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3919 }
3920
3921 /*
3922 * Evaluate the sequence of recurring events.
3923 */
3924 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3925 NULL /* pXcptRaiseInfo */);
3926 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3927 { /* likely */ }
3928 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3929 {
3930 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3931 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3932 u8Vector = X86_XCPT_DF;
3933 uErr = 0;
3934#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3935 /* VMX nested-guest #DF intercept needs to be checked here. */
3936 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3937 {
3938 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3939 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3940 return rcStrict0;
3941 }
3942#endif
3943 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3944 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3945 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3946 }
3947 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3948 {
3949 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3950 return iemInitiateCpuShutdown(pVCpu);
3951 }
3952 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3953 {
3954 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3955 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3956 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3957 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3958 return VERR_EM_GUEST_CPU_HANG;
3959 }
3960 else
3961 {
3962 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3963 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3964 return VERR_IEM_IPE_9;
3965 }
3966
3967 /*
3968 * The 'EXT' bit is set when an exception occurs during deliver of an external
3969 * event (such as an interrupt or earlier exception)[1]. Privileged software
3970 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3971 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3972 *
3973 * [1] - Intel spec. 6.13 "Error Code"
3974 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3975 * [3] - Intel Instruction reference for INT n.
3976 */
3977 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3978 && (fFlags & IEM_XCPT_FLAGS_ERR)
3979 && u8Vector != X86_XCPT_PF
3980 && u8Vector != X86_XCPT_DF)
3981 {
3982 uErr |= X86_TRAP_ERR_EXTERNAL;
3983 }
3984 }
3985
3986 pVCpu->iem.s.cXcptRecursions++;
3987 pVCpu->iem.s.uCurXcpt = u8Vector;
3988 pVCpu->iem.s.fCurXcpt = fFlags;
3989 pVCpu->iem.s.uCurXcptErr = uErr;
3990 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3991
3992 /*
3993 * Extensive logging.
3994 */
3995#if defined(LOG_ENABLED) && defined(IN_RING3)
3996 if (LogIs3Enabled())
3997 {
3998 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3999 PVM pVM = pVCpu->CTX_SUFF(pVM);
4000 char szRegs[4096];
4001 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4002 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4003 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4004 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4005 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4006 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4007 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4008 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4009 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4010 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4011 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4012 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4013 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4014 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4015 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4016 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4017 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4018 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4019 " efer=%016VR{efer}\n"
4020 " pat=%016VR{pat}\n"
4021 " sf_mask=%016VR{sf_mask}\n"
4022 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4023 " lstar=%016VR{lstar}\n"
4024 " star=%016VR{star} cstar=%016VR{cstar}\n"
4025 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4026 );
4027
4028 char szInstr[256];
4029 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4030 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4031 szInstr, sizeof(szInstr), NULL);
4032 Log3(("%s%s\n", szRegs, szInstr));
4033 }
4034#endif /* LOG_ENABLED */
4035
4036 /*
4037 * Stats.
4038 */
4039 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4040 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4041 else if (u8Vector <= X86_XCPT_LAST)
4042 {
4043 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4044 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4045 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4046 }
4047
4048 /*
4049 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4050 * to ensure that a stale TLB or paging cache entry will only cause one
4051 * spurious #PF.
4052 */
4053 if ( u8Vector == X86_XCPT_PF
4054 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4055 IEMTlbInvalidatePage(pVCpu, uCr2);
4056
4057 /*
4058 * Call the mode specific worker function.
4059 */
4060 VBOXSTRICTRC rcStrict;
4061 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4062 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4063 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4064 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4065 else
4066 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4067
4068 /* Flush the prefetch buffer. */
4069#ifdef IEM_WITH_CODE_TLB
4070 pVCpu->iem.s.pbInstrBuf = NULL;
4071#else
4072 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4073#endif
4074
4075 /*
4076 * Unwind.
4077 */
4078 pVCpu->iem.s.cXcptRecursions--;
4079 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4080 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4081 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4082 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4083 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4084 return rcStrict;
4085}
4086
4087#ifdef IEM_WITH_SETJMP
4088/**
4089 * See iemRaiseXcptOrInt. Will not return.
4090 */
4091DECL_NO_RETURN(void)
4092iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4093 uint8_t cbInstr,
4094 uint8_t u8Vector,
4095 uint32_t fFlags,
4096 uint16_t uErr,
4097 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4098{
4099 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4100 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4101}
4102#endif
4103
4104
4105/** \#DE - 00. */
4106VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4107{
4108 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4109}
4110
4111
4112/** \#DB - 01.
4113 * @note This automatically clear DR7.GD. */
4114VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4115{
4116 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4117 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4118 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4119}
4120
4121
4122/** \#BR - 05. */
4123VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4124{
4125 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4126}
4127
4128
4129/** \#UD - 06. */
4130VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4131{
4132 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4133}
4134
4135
4136/** \#NM - 07. */
4137VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4138{
4139 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4140}
4141
4142
4143/** \#TS(err) - 0a. */
4144VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4145{
4146 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4147}
4148
4149
4150/** \#TS(tr) - 0a. */
4151VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4152{
4153 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4154 pVCpu->cpum.GstCtx.tr.Sel, 0);
4155}
4156
4157
4158/** \#TS(0) - 0a. */
4159VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4160{
4161 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4162 0, 0);
4163}
4164
4165
4166/** \#TS(err) - 0a. */
4167VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4168{
4169 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4170 uSel & X86_SEL_MASK_OFF_RPL, 0);
4171}
4172
4173
4174/** \#NP(err) - 0b. */
4175VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4176{
4177 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4178}
4179
4180
4181/** \#NP(sel) - 0b. */
4182VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4183{
4184 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4185 uSel & ~X86_SEL_RPL, 0);
4186}
4187
4188
4189/** \#SS(seg) - 0c. */
4190VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4191{
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4193 uSel & ~X86_SEL_RPL, 0);
4194}
4195
4196
4197/** \#SS(err) - 0c. */
4198VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4199{
4200 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4201}
4202
4203
4204/** \#GP(n) - 0d. */
4205VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4206{
4207 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4208}
4209
4210
4211/** \#GP(0) - 0d. */
4212VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4213{
4214 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4215}
4216
4217#ifdef IEM_WITH_SETJMP
4218/** \#GP(0) - 0d. */
4219DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4220{
4221 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4222}
4223#endif
4224
4225
4226/** \#GP(sel) - 0d. */
4227VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4228{
4229 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4230 Sel & ~X86_SEL_RPL, 0);
4231}
4232
4233
4234/** \#GP(0) - 0d. */
4235VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4236{
4237 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4238}
4239
4240
4241/** \#GP(sel) - 0d. */
4242VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4243{
4244 NOREF(iSegReg); NOREF(fAccess);
4245 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4246 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4247}
4248
4249#ifdef IEM_WITH_SETJMP
4250/** \#GP(sel) - 0d, longjmp. */
4251DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4252{
4253 NOREF(iSegReg); NOREF(fAccess);
4254 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4255 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4256}
4257#endif
4258
4259/** \#GP(sel) - 0d. */
4260VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4261{
4262 NOREF(Sel);
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4264}
4265
4266#ifdef IEM_WITH_SETJMP
4267/** \#GP(sel) - 0d, longjmp. */
4268DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4269{
4270 NOREF(Sel);
4271 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4272}
4273#endif
4274
4275
4276/** \#GP(sel) - 0d. */
4277VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4278{
4279 NOREF(iSegReg); NOREF(fAccess);
4280 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4281}
4282
4283#ifdef IEM_WITH_SETJMP
4284/** \#GP(sel) - 0d, longjmp. */
4285DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4286{
4287 NOREF(iSegReg); NOREF(fAccess);
4288 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4289}
4290#endif
4291
4292
4293/** \#PF(n) - 0e. */
4294VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4295{
4296 uint16_t uErr;
4297 switch (rc)
4298 {
4299 case VERR_PAGE_NOT_PRESENT:
4300 case VERR_PAGE_TABLE_NOT_PRESENT:
4301 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4302 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4303 uErr = 0;
4304 break;
4305
4306 default:
4307 AssertMsgFailed(("%Rrc\n", rc));
4308 RT_FALL_THRU();
4309 case VERR_ACCESS_DENIED:
4310 uErr = X86_TRAP_PF_P;
4311 break;
4312
4313 /** @todo reserved */
4314 }
4315
4316 if (IEM_GET_CPL(pVCpu) == 3)
4317 uErr |= X86_TRAP_PF_US;
4318
4319 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4320 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4321 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4322 uErr |= X86_TRAP_PF_ID;
4323
4324#if 0 /* This is so much non-sense, really. Why was it done like that? */
4325 /* Note! RW access callers reporting a WRITE protection fault, will clear
4326 the READ flag before calling. So, read-modify-write accesses (RW)
4327 can safely be reported as READ faults. */
4328 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4329 uErr |= X86_TRAP_PF_RW;
4330#else
4331 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4332 {
4333 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4334 /// (regardless of outcome of the comparison in the latter case).
4335 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4336 uErr |= X86_TRAP_PF_RW;
4337 }
4338#endif
4339
4340 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4341 of the memory operand rather than at the start of it. (Not sure what
4342 happens if it crosses a page boundrary.) The current heuristics for
4343 this is to report the #PF for the last byte if the access is more than
4344 64 bytes. This is probably not correct, but we can work that out later,
4345 main objective now is to get FXSAVE to work like for real hardware and
4346 make bs3-cpu-basic2 work. */
4347 if (cbAccess <= 64)
4348 { /* likely*/ }
4349 else
4350 GCPtrWhere += cbAccess - 1;
4351
4352 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4353 uErr, GCPtrWhere);
4354}
4355
4356#ifdef IEM_WITH_SETJMP
4357/** \#PF(n) - 0e, longjmp. */
4358DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4359 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4360{
4361 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4362}
4363#endif
4364
4365
4366/** \#MF(0) - 10. */
4367VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4368{
4369 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4370 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4371
4372 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4373 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4374 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4375}
4376
4377
4378/** \#AC(0) - 11. */
4379VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4380{
4381 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4382}
4383
4384#ifdef IEM_WITH_SETJMP
4385/** \#AC(0) - 11, longjmp. */
4386DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4387{
4388 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4389}
4390#endif
4391
4392
4393/** \#XF(0)/\#XM(0) - 19. */
4394VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4395{
4396 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4397}
4398
4399
4400/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4401IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4402{
4403 NOREF(cbInstr);
4404 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4405}
4406
4407
4408/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4409IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4410{
4411 NOREF(cbInstr);
4412 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4413}
4414
4415
4416/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4417IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4418{
4419 NOREF(cbInstr);
4420 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4421}
4422
4423
4424/** @} */
4425
4426/** @name Common opcode decoders.
4427 * @{
4428 */
4429//#include <iprt/mem.h>
4430
4431/**
4432 * Used to add extra details about a stub case.
4433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4434 */
4435void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4436{
4437#if defined(LOG_ENABLED) && defined(IN_RING3)
4438 PVM pVM = pVCpu->CTX_SUFF(pVM);
4439 char szRegs[4096];
4440 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4441 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4442 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4443 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4444 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4445 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4446 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4447 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4448 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4449 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4450 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4451 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4452 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4453 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4454 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4455 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4456 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4457 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4458 " efer=%016VR{efer}\n"
4459 " pat=%016VR{pat}\n"
4460 " sf_mask=%016VR{sf_mask}\n"
4461 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4462 " lstar=%016VR{lstar}\n"
4463 " star=%016VR{star} cstar=%016VR{cstar}\n"
4464 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4465 );
4466
4467 char szInstr[256];
4468 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4469 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4470 szInstr, sizeof(szInstr), NULL);
4471
4472 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4473#else
4474 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4475#endif
4476}
4477
4478/** @} */
4479
4480
4481
4482/** @name Register Access.
4483 * @{
4484 */
4485
4486/**
4487 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4488 *
4489 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4490 * segment limit.
4491 *
4492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4493 * @param cbInstr Instruction size.
4494 * @param offNextInstr The offset of the next instruction.
4495 * @param enmEffOpSize Effective operand size.
4496 */
4497VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4498 IEMMODE enmEffOpSize) RT_NOEXCEPT
4499{
4500 switch (enmEffOpSize)
4501 {
4502 case IEMMODE_16BIT:
4503 {
4504 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4505 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4506 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4507 pVCpu->cpum.GstCtx.rip = uNewIp;
4508 else
4509 return iemRaiseGeneralProtectionFault0(pVCpu);
4510 break;
4511 }
4512
4513 case IEMMODE_32BIT:
4514 {
4515 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4516 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4517
4518 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4519 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4520 pVCpu->cpum.GstCtx.rip = uNewEip;
4521 else
4522 return iemRaiseGeneralProtectionFault0(pVCpu);
4523 break;
4524 }
4525
4526 case IEMMODE_64BIT:
4527 {
4528 Assert(IEM_IS_64BIT_CODE(pVCpu));
4529
4530 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4531 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4532 pVCpu->cpum.GstCtx.rip = uNewRip;
4533 else
4534 return iemRaiseGeneralProtectionFault0(pVCpu);
4535 break;
4536 }
4537
4538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4539 }
4540
4541#ifndef IEM_WITH_CODE_TLB
4542 /* Flush the prefetch buffer. */
4543 pVCpu->iem.s.cbOpcode = cbInstr;
4544#endif
4545
4546 /*
4547 * Clear RF and finish the instruction (maybe raise #DB).
4548 */
4549 return iemRegFinishClearingRF(pVCpu);
4550}
4551
4552
4553/**
4554 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4555 *
4556 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4557 * segment limit.
4558 *
4559 * @returns Strict VBox status code.
4560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4561 * @param cbInstr Instruction size.
4562 * @param offNextInstr The offset of the next instruction.
4563 */
4564VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4565{
4566 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4567
4568 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4569 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4570 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4571 pVCpu->cpum.GstCtx.rip = uNewIp;
4572 else
4573 return iemRaiseGeneralProtectionFault0(pVCpu);
4574
4575#ifndef IEM_WITH_CODE_TLB
4576 /* Flush the prefetch buffer. */
4577 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4578#endif
4579
4580 /*
4581 * Clear RF and finish the instruction (maybe raise #DB).
4582 */
4583 return iemRegFinishClearingRF(pVCpu);
4584}
4585
4586
4587/**
4588 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4589 *
4590 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4591 * segment limit.
4592 *
4593 * @returns Strict VBox status code.
4594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4595 * @param cbInstr Instruction size.
4596 * @param offNextInstr The offset of the next instruction.
4597 * @param enmEffOpSize Effective operand size.
4598 */
4599VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4600 IEMMODE enmEffOpSize) RT_NOEXCEPT
4601{
4602 if (enmEffOpSize == IEMMODE_32BIT)
4603 {
4604 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4605
4606 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4607 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4608 pVCpu->cpum.GstCtx.rip = uNewEip;
4609 else
4610 return iemRaiseGeneralProtectionFault0(pVCpu);
4611 }
4612 else
4613 {
4614 Assert(enmEffOpSize == IEMMODE_64BIT);
4615
4616 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4617 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4618 pVCpu->cpum.GstCtx.rip = uNewRip;
4619 else
4620 return iemRaiseGeneralProtectionFault0(pVCpu);
4621 }
4622
4623#ifndef IEM_WITH_CODE_TLB
4624 /* Flush the prefetch buffer. */
4625 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4626#endif
4627
4628 /*
4629 * Clear RF and finish the instruction (maybe raise #DB).
4630 */
4631 return iemRegFinishClearingRF(pVCpu);
4632}
4633
4634
4635/**
4636 * Performs a near jump to the specified address.
4637 *
4638 * May raise a \#GP(0) if the new IP outside the code segment limit.
4639 *
4640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4641 * @param uNewIp The new IP value.
4642 */
4643VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4644{
4645 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4646 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4647 pVCpu->cpum.GstCtx.rip = uNewIp;
4648 else
4649 return iemRaiseGeneralProtectionFault0(pVCpu);
4650 /** @todo Test 16-bit jump in 64-bit mode. */
4651
4652#ifndef IEM_WITH_CODE_TLB
4653 /* Flush the prefetch buffer. */
4654 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4655#endif
4656
4657 /*
4658 * Clear RF and finish the instruction (maybe raise #DB).
4659 */
4660 return iemRegFinishClearingRF(pVCpu);
4661}
4662
4663
4664/**
4665 * Performs a near jump to the specified address.
4666 *
4667 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4668 *
4669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4670 * @param uNewEip The new EIP value.
4671 */
4672VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4673{
4674 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4675 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4676
4677 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4678 pVCpu->cpum.GstCtx.rip = uNewEip;
4679 else
4680 return iemRaiseGeneralProtectionFault0(pVCpu);
4681
4682#ifndef IEM_WITH_CODE_TLB
4683 /* Flush the prefetch buffer. */
4684 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4685#endif
4686
4687 /*
4688 * Clear RF and finish the instruction (maybe raise #DB).
4689 */
4690 return iemRegFinishClearingRF(pVCpu);
4691}
4692
4693
4694/**
4695 * Performs a near jump to the specified address.
4696 *
4697 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4698 * segment limit.
4699 *
4700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4701 * @param uNewRip The new RIP value.
4702 */
4703VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4704{
4705 Assert(IEM_IS_64BIT_CODE(pVCpu));
4706
4707 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4708 pVCpu->cpum.GstCtx.rip = uNewRip;
4709 else
4710 return iemRaiseGeneralProtectionFault0(pVCpu);
4711
4712#ifndef IEM_WITH_CODE_TLB
4713 /* Flush the prefetch buffer. */
4714 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4715#endif
4716
4717 /*
4718 * Clear RF and finish the instruction (maybe raise #DB).
4719 */
4720 return iemRegFinishClearingRF(pVCpu);
4721}
4722
4723/** @} */
4724
4725
4726/** @name FPU access and helpers.
4727 *
4728 * @{
4729 */
4730
4731/**
4732 * Updates the x87.DS and FPUDP registers.
4733 *
4734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4735 * @param pFpuCtx The FPU context.
4736 * @param iEffSeg The effective segment register.
4737 * @param GCPtrEff The effective address relative to @a iEffSeg.
4738 */
4739DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4740{
4741 RTSEL sel;
4742 switch (iEffSeg)
4743 {
4744 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4745 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4746 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4747 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4748 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4749 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4750 default:
4751 AssertMsgFailed(("%d\n", iEffSeg));
4752 sel = pVCpu->cpum.GstCtx.ds.Sel;
4753 }
4754 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4755 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4756 {
4757 pFpuCtx->DS = 0;
4758 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4759 }
4760 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4761 {
4762 pFpuCtx->DS = sel;
4763 pFpuCtx->FPUDP = GCPtrEff;
4764 }
4765 else
4766 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4767}
4768
4769
4770/**
4771 * Rotates the stack registers in the push direction.
4772 *
4773 * @param pFpuCtx The FPU context.
4774 * @remarks This is a complete waste of time, but fxsave stores the registers in
4775 * stack order.
4776 */
4777DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4778{
4779 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4780 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4781 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4782 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4783 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4784 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4785 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4786 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4787 pFpuCtx->aRegs[0].r80 = r80Tmp;
4788}
4789
4790
4791/**
4792 * Rotates the stack registers in the pop direction.
4793 *
4794 * @param pFpuCtx The FPU context.
4795 * @remarks This is a complete waste of time, but fxsave stores the registers in
4796 * stack order.
4797 */
4798DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4799{
4800 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4801 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4802 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4803 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4804 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4805 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4806 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4807 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4808 pFpuCtx->aRegs[7].r80 = r80Tmp;
4809}
4810
4811
4812/**
4813 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4814 * exception prevents it.
4815 *
4816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4817 * @param pResult The FPU operation result to push.
4818 * @param pFpuCtx The FPU context.
4819 */
4820static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4821{
4822 /* Update FSW and bail if there are pending exceptions afterwards. */
4823 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4824 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4825 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4826 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4827 {
4828 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4829 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4830 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4831 pFpuCtx->FSW = fFsw;
4832 return;
4833 }
4834
4835 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4836 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4837 {
4838 /* All is fine, push the actual value. */
4839 pFpuCtx->FTW |= RT_BIT(iNewTop);
4840 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4841 }
4842 else if (pFpuCtx->FCW & X86_FCW_IM)
4843 {
4844 /* Masked stack overflow, push QNaN. */
4845 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4846 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4847 }
4848 else
4849 {
4850 /* Raise stack overflow, don't push anything. */
4851 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4852 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4853 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4854 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4855 return;
4856 }
4857
4858 fFsw &= ~X86_FSW_TOP_MASK;
4859 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4860 pFpuCtx->FSW = fFsw;
4861
4862 iemFpuRotateStackPush(pFpuCtx);
4863 RT_NOREF(pVCpu);
4864}
4865
4866
4867/**
4868 * Stores a result in a FPU register and updates the FSW and FTW.
4869 *
4870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4871 * @param pFpuCtx The FPU context.
4872 * @param pResult The result to store.
4873 * @param iStReg Which FPU register to store it in.
4874 */
4875static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4876{
4877 Assert(iStReg < 8);
4878 uint16_t fNewFsw = pFpuCtx->FSW;
4879 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4880 fNewFsw &= ~X86_FSW_C_MASK;
4881 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4882 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4883 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4884 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4885 pFpuCtx->FSW = fNewFsw;
4886 pFpuCtx->FTW |= RT_BIT(iReg);
4887 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4888 RT_NOREF(pVCpu);
4889}
4890
4891
4892/**
4893 * Only updates the FPU status word (FSW) with the result of the current
4894 * instruction.
4895 *
4896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4897 * @param pFpuCtx The FPU context.
4898 * @param u16FSW The FSW output of the current instruction.
4899 */
4900static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4901{
4902 uint16_t fNewFsw = pFpuCtx->FSW;
4903 fNewFsw &= ~X86_FSW_C_MASK;
4904 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4905 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4906 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4907 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4908 pFpuCtx->FSW = fNewFsw;
4909 RT_NOREF(pVCpu);
4910}
4911
4912
4913/**
4914 * Pops one item off the FPU stack if no pending exception prevents it.
4915 *
4916 * @param pFpuCtx The FPU context.
4917 */
4918static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4919{
4920 /* Check pending exceptions. */
4921 uint16_t uFSW = pFpuCtx->FSW;
4922 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4923 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4924 return;
4925
4926 /* TOP--. */
4927 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4928 uFSW &= ~X86_FSW_TOP_MASK;
4929 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4930 pFpuCtx->FSW = uFSW;
4931
4932 /* Mark the previous ST0 as empty. */
4933 iOldTop >>= X86_FSW_TOP_SHIFT;
4934 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4935
4936 /* Rotate the registers. */
4937 iemFpuRotateStackPop(pFpuCtx);
4938}
4939
4940
4941/**
4942 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4943 *
4944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4945 * @param pResult The FPU operation result to push.
4946 * @param uFpuOpcode The FPU opcode value.
4947 */
4948void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4949{
4950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4951 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4952 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4953}
4954
4955
4956/**
4957 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4958 * and sets FPUDP and FPUDS.
4959 *
4960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4961 * @param pResult The FPU operation result to push.
4962 * @param iEffSeg The effective segment register.
4963 * @param GCPtrEff The effective address relative to @a iEffSeg.
4964 * @param uFpuOpcode The FPU opcode value.
4965 */
4966void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4967 uint16_t uFpuOpcode) RT_NOEXCEPT
4968{
4969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4970 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4971 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4972 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4973}
4974
4975
4976/**
4977 * Replace ST0 with the first value and push the second onto the FPU stack,
4978 * unless a pending exception prevents it.
4979 *
4980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4981 * @param pResult The FPU operation result to store and push.
4982 * @param uFpuOpcode The FPU opcode value.
4983 */
4984void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4985{
4986 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4987 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4988
4989 /* Update FSW and bail if there are pending exceptions afterwards. */
4990 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4991 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4992 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4993 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4994 {
4995 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4996 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4997 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4998 pFpuCtx->FSW = fFsw;
4999 return;
5000 }
5001
5002 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5003 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5004 {
5005 /* All is fine, push the actual value. */
5006 pFpuCtx->FTW |= RT_BIT(iNewTop);
5007 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5008 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5009 }
5010 else if (pFpuCtx->FCW & X86_FCW_IM)
5011 {
5012 /* Masked stack overflow, push QNaN. */
5013 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5014 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5015 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5016 }
5017 else
5018 {
5019 /* Raise stack overflow, don't push anything. */
5020 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5021 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5022 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5023 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5024 return;
5025 }
5026
5027 fFsw &= ~X86_FSW_TOP_MASK;
5028 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5029 pFpuCtx->FSW = fFsw;
5030
5031 iemFpuRotateStackPush(pFpuCtx);
5032}
5033
5034
5035/**
5036 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5037 * FOP.
5038 *
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 * @param pResult The result to store.
5041 * @param iStReg Which FPU register to store it in.
5042 * @param uFpuOpcode The FPU opcode value.
5043 */
5044void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5045{
5046 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5047 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5048 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5049}
5050
5051
5052/**
5053 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5054 * FOP, and then pops the stack.
5055 *
5056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5057 * @param pResult The result to store.
5058 * @param iStReg Which FPU register to store it in.
5059 * @param uFpuOpcode The FPU opcode value.
5060 */
5061void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5062{
5063 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5064 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5065 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5066 iemFpuMaybePopOne(pFpuCtx);
5067}
5068
5069
5070/**
5071 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5072 * FPUDP, and FPUDS.
5073 *
5074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5075 * @param pResult The result to store.
5076 * @param iStReg Which FPU register to store it in.
5077 * @param iEffSeg The effective memory operand selector register.
5078 * @param GCPtrEff The effective memory operand offset.
5079 * @param uFpuOpcode The FPU opcode value.
5080 */
5081void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5082 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5083{
5084 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5085 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5086 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5087 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5088}
5089
5090
5091/**
5092 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5093 * FPUDP, and FPUDS, and then pops the stack.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 * @param pResult The result to store.
5097 * @param iStReg Which FPU register to store it in.
5098 * @param iEffSeg The effective memory operand selector register.
5099 * @param GCPtrEff The effective memory operand offset.
5100 * @param uFpuOpcode The FPU opcode value.
5101 */
5102void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5103 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5104{
5105 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5106 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5107 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5108 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5109 iemFpuMaybePopOne(pFpuCtx);
5110}
5111
5112
5113/**
5114 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5115 *
5116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5117 * @param uFpuOpcode The FPU opcode value.
5118 */
5119void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5120{
5121 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5122 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5123}
5124
5125
5126/**
5127 * Updates the FSW, FOP, FPUIP, and FPUCS.
5128 *
5129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5130 * @param u16FSW The FSW from the current instruction.
5131 * @param uFpuOpcode The FPU opcode value.
5132 */
5133void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5134{
5135 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5136 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5137 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5138}
5139
5140
5141/**
5142 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5143 *
5144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5145 * @param u16FSW The FSW from the current instruction.
5146 * @param uFpuOpcode The FPU opcode value.
5147 */
5148void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5149{
5150 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5151 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5152 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5153 iemFpuMaybePopOne(pFpuCtx);
5154}
5155
5156
5157/**
5158 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5159 *
5160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5161 * @param u16FSW The FSW from the current instruction.
5162 * @param iEffSeg The effective memory operand selector register.
5163 * @param GCPtrEff The effective memory operand offset.
5164 * @param uFpuOpcode The FPU opcode value.
5165 */
5166void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5167{
5168 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5169 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5170 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5171 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5172}
5173
5174
5175/**
5176 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5177 *
5178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5179 * @param u16FSW The FSW from the current instruction.
5180 * @param uFpuOpcode The FPU opcode value.
5181 */
5182void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5183{
5184 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5185 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5186 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5187 iemFpuMaybePopOne(pFpuCtx);
5188 iemFpuMaybePopOne(pFpuCtx);
5189}
5190
5191
5192/**
5193 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5194 *
5195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5196 * @param u16FSW The FSW from the current instruction.
5197 * @param iEffSeg The effective memory operand selector register.
5198 * @param GCPtrEff The effective memory operand offset.
5199 * @param uFpuOpcode The FPU opcode value.
5200 */
5201void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5202 uint16_t uFpuOpcode) RT_NOEXCEPT
5203{
5204 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5205 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5206 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5207 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5208 iemFpuMaybePopOne(pFpuCtx);
5209}
5210
5211
5212/**
5213 * Worker routine for raising an FPU stack underflow exception.
5214 *
5215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5216 * @param pFpuCtx The FPU context.
5217 * @param iStReg The stack register being accessed.
5218 */
5219static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5220{
5221 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5222 if (pFpuCtx->FCW & X86_FCW_IM)
5223 {
5224 /* Masked underflow. */
5225 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5226 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5227 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5228 if (iStReg != UINT8_MAX)
5229 {
5230 pFpuCtx->FTW |= RT_BIT(iReg);
5231 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5232 }
5233 }
5234 else
5235 {
5236 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5237 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5238 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5239 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5240 }
5241 RT_NOREF(pVCpu);
5242}
5243
5244
5245/**
5246 * Raises a FPU stack underflow exception.
5247 *
5248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5249 * @param iStReg The destination register that should be loaded
5250 * with QNaN if \#IS is not masked. Specify
5251 * UINT8_MAX if none (like for fcom).
5252 * @param uFpuOpcode The FPU opcode value.
5253 */
5254void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5255{
5256 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5257 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5258 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5259}
5260
5261
5262void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5263{
5264 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5265 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5266 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5267 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5268}
5269
5270
5271void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5272{
5273 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5274 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5275 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5276 iemFpuMaybePopOne(pFpuCtx);
5277}
5278
5279
5280void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5281 uint16_t uFpuOpcode) RT_NOEXCEPT
5282{
5283 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5284 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5285 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5286 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5287 iemFpuMaybePopOne(pFpuCtx);
5288}
5289
5290
5291void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5292{
5293 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5294 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5295 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5296 iemFpuMaybePopOne(pFpuCtx);
5297 iemFpuMaybePopOne(pFpuCtx);
5298}
5299
5300
5301void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5302{
5303 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5304 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5305
5306 if (pFpuCtx->FCW & X86_FCW_IM)
5307 {
5308 /* Masked overflow - Push QNaN. */
5309 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5310 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5311 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5312 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5313 pFpuCtx->FTW |= RT_BIT(iNewTop);
5314 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5315 iemFpuRotateStackPush(pFpuCtx);
5316 }
5317 else
5318 {
5319 /* Exception pending - don't change TOP or the register stack. */
5320 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5321 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5322 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5324 }
5325}
5326
5327
5328void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5329{
5330 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5331 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5332
5333 if (pFpuCtx->FCW & X86_FCW_IM)
5334 {
5335 /* Masked overflow - Push QNaN. */
5336 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5337 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5338 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5339 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5340 pFpuCtx->FTW |= RT_BIT(iNewTop);
5341 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5342 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5343 iemFpuRotateStackPush(pFpuCtx);
5344 }
5345 else
5346 {
5347 /* Exception pending - don't change TOP or the register stack. */
5348 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5349 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5350 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5352 }
5353}
5354
5355
5356/**
5357 * Worker routine for raising an FPU stack overflow exception on a push.
5358 *
5359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5360 * @param pFpuCtx The FPU context.
5361 */
5362static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5363{
5364 if (pFpuCtx->FCW & X86_FCW_IM)
5365 {
5366 /* Masked overflow. */
5367 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5368 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5369 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5370 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5371 pFpuCtx->FTW |= RT_BIT(iNewTop);
5372 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5373 iemFpuRotateStackPush(pFpuCtx);
5374 }
5375 else
5376 {
5377 /* Exception pending - don't change TOP or the register stack. */
5378 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5379 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5380 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5381 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5382 }
5383 RT_NOREF(pVCpu);
5384}
5385
5386
5387/**
5388 * Raises a FPU stack overflow exception on a push.
5389 *
5390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5391 * @param uFpuOpcode The FPU opcode value.
5392 */
5393void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5394{
5395 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5396 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5397 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5398}
5399
5400
5401/**
5402 * Raises a FPU stack overflow exception on a push with a memory operand.
5403 *
5404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5405 * @param iEffSeg The effective memory operand selector register.
5406 * @param GCPtrEff The effective memory operand offset.
5407 * @param uFpuOpcode The FPU opcode value.
5408 */
5409void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5410{
5411 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5412 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5413 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5414 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5415}
5416
5417/** @} */
5418
5419
5420/** @name SSE+AVX SIMD access and helpers.
5421 *
5422 * @{
5423 */
5424/**
5425 * Stores a result in a SIMD XMM register, updates the MXCSR.
5426 *
5427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5428 * @param pResult The result to store.
5429 * @param iXmmReg Which SIMD XMM register to store the result in.
5430 */
5431void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5432{
5433 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5434 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5435
5436 /* The result is only updated if there is no unmasked exception pending. */
5437 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5438 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5439 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5440}
5441
5442
5443/**
5444 * Updates the MXCSR.
5445 *
5446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5447 * @param fMxcsr The new MXCSR value.
5448 */
5449void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5450{
5451 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5452 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5453}
5454/** @} */
5455
5456
5457/** @name Memory access.
5458 *
5459 * @{
5460 */
5461
5462
5463/**
5464 * Updates the IEMCPU::cbWritten counter if applicable.
5465 *
5466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5467 * @param fAccess The access being accounted for.
5468 * @param cbMem The access size.
5469 */
5470DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5471{
5472 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5473 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5474 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5475}
5476
5477
5478/**
5479 * Applies the segment limit, base and attributes.
5480 *
5481 * This may raise a \#GP or \#SS.
5482 *
5483 * @returns VBox strict status code.
5484 *
5485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5486 * @param fAccess The kind of access which is being performed.
5487 * @param iSegReg The index of the segment register to apply.
5488 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5489 * TSS, ++).
5490 * @param cbMem The access size.
5491 * @param pGCPtrMem Pointer to the guest memory address to apply
5492 * segmentation to. Input and output parameter.
5493 */
5494VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5495{
5496 if (iSegReg == UINT8_MAX)
5497 return VINF_SUCCESS;
5498
5499 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5500 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5501 switch (IEM_GET_CPU_MODE(pVCpu))
5502 {
5503 case IEMMODE_16BIT:
5504 case IEMMODE_32BIT:
5505 {
5506 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5507 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5508
5509 if ( pSel->Attr.n.u1Present
5510 && !pSel->Attr.n.u1Unusable)
5511 {
5512 Assert(pSel->Attr.n.u1DescType);
5513 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5514 {
5515 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5516 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5517 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5518
5519 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5520 {
5521 /** @todo CPL check. */
5522 }
5523
5524 /*
5525 * There are two kinds of data selectors, normal and expand down.
5526 */
5527 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5528 {
5529 if ( GCPtrFirst32 > pSel->u32Limit
5530 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5531 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5532 }
5533 else
5534 {
5535 /*
5536 * The upper boundary is defined by the B bit, not the G bit!
5537 */
5538 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5539 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5540 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5541 }
5542 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5543 }
5544 else
5545 {
5546 /*
5547 * Code selector and usually be used to read thru, writing is
5548 * only permitted in real and V8086 mode.
5549 */
5550 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5551 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5552 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5553 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5554 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5555
5556 if ( GCPtrFirst32 > pSel->u32Limit
5557 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5558 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5559
5560 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5561 {
5562 /** @todo CPL check. */
5563 }
5564
5565 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5566 }
5567 }
5568 else
5569 return iemRaiseGeneralProtectionFault0(pVCpu);
5570 return VINF_SUCCESS;
5571 }
5572
5573 case IEMMODE_64BIT:
5574 {
5575 RTGCPTR GCPtrMem = *pGCPtrMem;
5576 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5577 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5578
5579 Assert(cbMem >= 1);
5580 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5581 return VINF_SUCCESS;
5582 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5583 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5584 return iemRaiseGeneralProtectionFault0(pVCpu);
5585 }
5586
5587 default:
5588 AssertFailedReturn(VERR_IEM_IPE_7);
5589 }
5590}
5591
5592
5593/**
5594 * Translates a virtual address to a physical physical address and checks if we
5595 * can access the page as specified.
5596 *
5597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5598 * @param GCPtrMem The virtual address.
5599 * @param cbAccess The access size, for raising \#PF correctly for
5600 * FXSAVE and such.
5601 * @param fAccess The intended access.
5602 * @param pGCPhysMem Where to return the physical address.
5603 */
5604VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5605 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5606{
5607 /** @todo Need a different PGM interface here. We're currently using
5608 * generic / REM interfaces. this won't cut it for R0. */
5609 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5610 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5611 * here. */
5612 PGMPTWALK Walk;
5613 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5614 if (RT_FAILURE(rc))
5615 {
5616 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5617 /** @todo Check unassigned memory in unpaged mode. */
5618 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5619#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5620 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5621 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5622#endif
5623 *pGCPhysMem = NIL_RTGCPHYS;
5624 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5625 }
5626
5627 /* If the page is writable and does not have the no-exec bit set, all
5628 access is allowed. Otherwise we'll have to check more carefully... */
5629 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5630 {
5631 /* Write to read only memory? */
5632 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5633 && !(Walk.fEffective & X86_PTE_RW)
5634 && ( ( IEM_GET_CPL(pVCpu) == 3
5635 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5636 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5637 {
5638 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5639 *pGCPhysMem = NIL_RTGCPHYS;
5640#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5641 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5642 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5643#endif
5644 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5645 }
5646
5647 /* Kernel memory accessed by userland? */
5648 if ( !(Walk.fEffective & X86_PTE_US)
5649 && IEM_GET_CPL(pVCpu) == 3
5650 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5651 {
5652 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5653 *pGCPhysMem = NIL_RTGCPHYS;
5654#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5655 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5656 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5657#endif
5658 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5659 }
5660
5661 /* Executing non-executable memory? */
5662 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5663 && (Walk.fEffective & X86_PTE_PAE_NX)
5664 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5665 {
5666 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5667 *pGCPhysMem = NIL_RTGCPHYS;
5668#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5669 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5670 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5671#endif
5672 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5673 VERR_ACCESS_DENIED);
5674 }
5675 }
5676
5677 /*
5678 * Set the dirty / access flags.
5679 * ASSUMES this is set when the address is translated rather than on committ...
5680 */
5681 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5682 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5683 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5684 {
5685 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5686 AssertRC(rc2);
5687 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5688 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5689 }
5690
5691 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5692 *pGCPhysMem = GCPhys;
5693 return VINF_SUCCESS;
5694}
5695
5696
5697/**
5698 * Looks up a memory mapping entry.
5699 *
5700 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5702 * @param pvMem The memory address.
5703 * @param fAccess The access to.
5704 */
5705DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5706{
5707 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5708 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5709 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5710 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5711 return 0;
5712 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5713 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5714 return 1;
5715 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 2;
5718 return VERR_NOT_FOUND;
5719}
5720
5721
5722/**
5723 * Finds a free memmap entry when using iNextMapping doesn't work.
5724 *
5725 * @returns Memory mapping index, 1024 on failure.
5726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5727 */
5728static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5729{
5730 /*
5731 * The easy case.
5732 */
5733 if (pVCpu->iem.s.cActiveMappings == 0)
5734 {
5735 pVCpu->iem.s.iNextMapping = 1;
5736 return 0;
5737 }
5738
5739 /* There should be enough mappings for all instructions. */
5740 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5741
5742 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5743 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5744 return i;
5745
5746 AssertFailedReturn(1024);
5747}
5748
5749
5750/**
5751 * Commits a bounce buffer that needs writing back and unmaps it.
5752 *
5753 * @returns Strict VBox status code.
5754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5755 * @param iMemMap The index of the buffer to commit.
5756 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5757 * Always false in ring-3, obviously.
5758 */
5759static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5760{
5761 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5762 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5763#ifdef IN_RING3
5764 Assert(!fPostponeFail);
5765 RT_NOREF_PV(fPostponeFail);
5766#endif
5767
5768 /*
5769 * Do the writing.
5770 */
5771 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5772 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5773 {
5774 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5775 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5776 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5777 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5778 {
5779 /*
5780 * Carefully and efficiently dealing with access handler return
5781 * codes make this a little bloated.
5782 */
5783 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5784 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5785 pbBuf,
5786 cbFirst,
5787 PGMACCESSORIGIN_IEM);
5788 if (rcStrict == VINF_SUCCESS)
5789 {
5790 if (cbSecond)
5791 {
5792 rcStrict = PGMPhysWrite(pVM,
5793 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5794 pbBuf + cbFirst,
5795 cbSecond,
5796 PGMACCESSORIGIN_IEM);
5797 if (rcStrict == VINF_SUCCESS)
5798 { /* nothing */ }
5799 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5800 {
5801 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5804 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5805 }
5806#ifndef IN_RING3
5807 else if (fPostponeFail)
5808 {
5809 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5813 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5814 return iemSetPassUpStatus(pVCpu, rcStrict);
5815 }
5816#endif
5817 else
5818 {
5819 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5821 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5822 return rcStrict;
5823 }
5824 }
5825 }
5826 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5827 {
5828 if (!cbSecond)
5829 {
5830 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5831 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5832 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5833 }
5834 else
5835 {
5836 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5837 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5838 pbBuf + cbFirst,
5839 cbSecond,
5840 PGMACCESSORIGIN_IEM);
5841 if (rcStrict2 == VINF_SUCCESS)
5842 {
5843 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5846 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5847 }
5848 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5849 {
5850 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5853 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5854 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5855 }
5856#ifndef IN_RING3
5857 else if (fPostponeFail)
5858 {
5859 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5862 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5863 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5864 return iemSetPassUpStatus(pVCpu, rcStrict);
5865 }
5866#endif
5867 else
5868 {
5869 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5872 return rcStrict2;
5873 }
5874 }
5875 }
5876#ifndef IN_RING3
5877 else if (fPostponeFail)
5878 {
5879 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5881 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5882 if (!cbSecond)
5883 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5884 else
5885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5886 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5887 return iemSetPassUpStatus(pVCpu, rcStrict);
5888 }
5889#endif
5890 else
5891 {
5892 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5895 return rcStrict;
5896 }
5897 }
5898 else
5899 {
5900 /*
5901 * No access handlers, much simpler.
5902 */
5903 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5904 if (RT_SUCCESS(rc))
5905 {
5906 if (cbSecond)
5907 {
5908 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5909 if (RT_SUCCESS(rc))
5910 { /* likely */ }
5911 else
5912 {
5913 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5915 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5916 return rc;
5917 }
5918 }
5919 }
5920 else
5921 {
5922 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5925 return rc;
5926 }
5927 }
5928 }
5929
5930#if defined(IEM_LOG_MEMORY_WRITES)
5931 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5932 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5933 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5934 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5935 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5936 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5937
5938 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5939 g_cbIemWrote = cbWrote;
5940 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5941#endif
5942
5943 /*
5944 * Free the mapping entry.
5945 */
5946 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5947 Assert(pVCpu->iem.s.cActiveMappings != 0);
5948 pVCpu->iem.s.cActiveMappings--;
5949 return VINF_SUCCESS;
5950}
5951
5952
5953/**
5954 * iemMemMap worker that deals with a request crossing pages.
5955 */
5956static VBOXSTRICTRC
5957iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5958{
5959 Assert(cbMem <= GUEST_PAGE_SIZE);
5960
5961 /*
5962 * Do the address translations.
5963 */
5964 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5965 RTGCPHYS GCPhysFirst;
5966 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5967 if (rcStrict != VINF_SUCCESS)
5968 return rcStrict;
5969 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5970
5971 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5972 RTGCPHYS GCPhysSecond;
5973 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5974 cbSecondPage, fAccess, &GCPhysSecond);
5975 if (rcStrict != VINF_SUCCESS)
5976 return rcStrict;
5977 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5978 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5979
5980 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5981
5982 /*
5983 * Read in the current memory content if it's a read, execute or partial
5984 * write access.
5985 */
5986 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5987
5988 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5989 {
5990 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5991 {
5992 /*
5993 * Must carefully deal with access handler status codes here,
5994 * makes the code a bit bloated.
5995 */
5996 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5997 if (rcStrict == VINF_SUCCESS)
5998 {
5999 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6000 if (rcStrict == VINF_SUCCESS)
6001 { /*likely */ }
6002 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6003 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6004 else
6005 {
6006 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6007 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6008 return rcStrict;
6009 }
6010 }
6011 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6012 {
6013 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6014 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6015 {
6016 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6017 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6018 }
6019 else
6020 {
6021 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6022 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6023 return rcStrict2;
6024 }
6025 }
6026 else
6027 {
6028 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6029 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6030 return rcStrict;
6031 }
6032 }
6033 else
6034 {
6035 /*
6036 * No informational status codes here, much more straight forward.
6037 */
6038 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6039 if (RT_SUCCESS(rc))
6040 {
6041 Assert(rc == VINF_SUCCESS);
6042 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6043 if (RT_SUCCESS(rc))
6044 Assert(rc == VINF_SUCCESS);
6045 else
6046 {
6047 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6048 return rc;
6049 }
6050 }
6051 else
6052 {
6053 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6054 return rc;
6055 }
6056 }
6057 }
6058#ifdef VBOX_STRICT
6059 else
6060 memset(pbBuf, 0xcc, cbMem);
6061 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6062 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6063#endif
6064 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6065
6066 /*
6067 * Commit the bounce buffer entry.
6068 */
6069 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6070 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6071 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6074 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6075 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6076 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6077 pVCpu->iem.s.cActiveMappings++;
6078
6079 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6080 *ppvMem = pbBuf;
6081 return VINF_SUCCESS;
6082}
6083
6084
6085/**
6086 * iemMemMap woker that deals with iemMemPageMap failures.
6087 */
6088static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6089 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6090{
6091 /*
6092 * Filter out conditions we can handle and the ones which shouldn't happen.
6093 */
6094 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6095 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6096 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6097 {
6098 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6099 return rcMap;
6100 }
6101 pVCpu->iem.s.cPotentialExits++;
6102
6103 /*
6104 * Read in the current memory content if it's a read, execute or partial
6105 * write access.
6106 */
6107 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6108 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6109 {
6110 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6111 memset(pbBuf, 0xff, cbMem);
6112 else
6113 {
6114 int rc;
6115 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6116 {
6117 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6118 if (rcStrict == VINF_SUCCESS)
6119 { /* nothing */ }
6120 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6121 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6122 else
6123 {
6124 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6125 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6126 return rcStrict;
6127 }
6128 }
6129 else
6130 {
6131 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6132 if (RT_SUCCESS(rc))
6133 { /* likely */ }
6134 else
6135 {
6136 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6137 GCPhysFirst, rc));
6138 return rc;
6139 }
6140 }
6141 }
6142 }
6143#ifdef VBOX_STRICT
6144 else
6145 memset(pbBuf, 0xcc, cbMem);
6146#endif
6147#ifdef VBOX_STRICT
6148 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6149 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6150#endif
6151
6152 /*
6153 * Commit the bounce buffer entry.
6154 */
6155 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6160 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6161 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6162 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6163 pVCpu->iem.s.cActiveMappings++;
6164
6165 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6166 *ppvMem = pbBuf;
6167 return VINF_SUCCESS;
6168}
6169
6170
6171
6172/**
6173 * Maps the specified guest memory for the given kind of access.
6174 *
6175 * This may be using bounce buffering of the memory if it's crossing a page
6176 * boundary or if there is an access handler installed for any of it. Because
6177 * of lock prefix guarantees, we're in for some extra clutter when this
6178 * happens.
6179 *
6180 * This may raise a \#GP, \#SS, \#PF or \#AC.
6181 *
6182 * @returns VBox strict status code.
6183 *
6184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6185 * @param ppvMem Where to return the pointer to the mapped memory.
6186 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6187 * 8, 12, 16, 32 or 512. When used by string operations
6188 * it can be up to a page.
6189 * @param iSegReg The index of the segment register to use for this
6190 * access. The base and limits are checked. Use UINT8_MAX
6191 * to indicate that no segmentation is required (for IDT,
6192 * GDT and LDT accesses).
6193 * @param GCPtrMem The address of the guest memory.
6194 * @param fAccess How the memory is being accessed. The
6195 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6196 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6197 * when raising exceptions.
6198 * @param uAlignCtl Alignment control:
6199 * - Bits 15:0 is the alignment mask.
6200 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6201 * IEM_MEMMAP_F_ALIGN_SSE, and
6202 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6203 * Pass zero to skip alignment.
6204 */
6205VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6206 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6207{
6208 /*
6209 * Check the input and figure out which mapping entry to use.
6210 */
6211 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6212 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6213 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6214 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6215 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6216
6217 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6218 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6219 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6220 {
6221 iMemMap = iemMemMapFindFree(pVCpu);
6222 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6223 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6224 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6225 pVCpu->iem.s.aMemMappings[2].fAccess),
6226 VERR_IEM_IPE_9);
6227 }
6228
6229 /*
6230 * Map the memory, checking that we can actually access it. If something
6231 * slightly complicated happens, fall back on bounce buffering.
6232 */
6233 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6234 if (rcStrict == VINF_SUCCESS)
6235 { /* likely */ }
6236 else
6237 return rcStrict;
6238
6239 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6240 { /* likely */ }
6241 else
6242 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6243
6244 /*
6245 * Alignment check.
6246 */
6247 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6248 { /* likelyish */ }
6249 else
6250 {
6251 /* Misaligned access. */
6252 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6253 {
6254 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6255 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6256 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6257 {
6258 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6259
6260 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6261 return iemRaiseAlignmentCheckException(pVCpu);
6262 }
6263 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6264 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6265 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6266 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6267 * that's what FXSAVE does on a 10980xe. */
6268 && iemMemAreAlignmentChecksEnabled(pVCpu))
6269 return iemRaiseAlignmentCheckException(pVCpu);
6270 else
6271 return iemRaiseGeneralProtectionFault0(pVCpu);
6272 }
6273 }
6274
6275#ifdef IEM_WITH_DATA_TLB
6276 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6277
6278 /*
6279 * Get the TLB entry for this page.
6280 */
6281 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6282 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6283 if (pTlbe->uTag == uTag)
6284 {
6285# ifdef VBOX_WITH_STATISTICS
6286 pVCpu->iem.s.DataTlb.cTlbHits++;
6287# endif
6288 }
6289 else
6290 {
6291 pVCpu->iem.s.DataTlb.cTlbMisses++;
6292 PGMPTWALK Walk;
6293 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6294 if (RT_FAILURE(rc))
6295 {
6296 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6297# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6298 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6299 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6300# endif
6301 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6302 }
6303
6304 Assert(Walk.fSucceeded);
6305 pTlbe->uTag = uTag;
6306 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6307 pTlbe->GCPhys = Walk.GCPhys;
6308 pTlbe->pbMappingR3 = NULL;
6309 }
6310
6311 /*
6312 * Check TLB page table level access flags.
6313 */
6314 /* If the page is either supervisor only or non-writable, we need to do
6315 more careful access checks. */
6316 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6317 {
6318 /* Write to read only memory? */
6319 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6320 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6321 && ( ( IEM_GET_CPL(pVCpu) == 3
6322 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6323 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6324 {
6325 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6326# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6327 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6328 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6329# endif
6330 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6331 }
6332
6333 /* Kernel memory accessed by userland? */
6334 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6335 && IEM_GET_CPL(pVCpu) == 3
6336 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6337 {
6338 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6339# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6340 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6341 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6342# endif
6343 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6344 }
6345 }
6346
6347 /*
6348 * Set the dirty / access flags.
6349 * ASSUMES this is set when the address is translated rather than on commit...
6350 */
6351 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6352 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6353 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6354 {
6355 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6356 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6357 AssertRC(rc2);
6358 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6359 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6360 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6361 }
6362
6363 /*
6364 * Look up the physical page info if necessary.
6365 */
6366 uint8_t *pbMem = NULL;
6367 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6368# ifdef IN_RING3
6369 pbMem = pTlbe->pbMappingR3;
6370# else
6371 pbMem = NULL;
6372# endif
6373 else
6374 {
6375 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6376 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6377 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6379 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6380 { /* likely */ }
6381 else
6382 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6383 pTlbe->pbMappingR3 = NULL;
6384 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6385 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6386 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6387 &pbMem, &pTlbe->fFlagsAndPhysRev);
6388 AssertRCReturn(rc, rc);
6389# ifdef IN_RING3
6390 pTlbe->pbMappingR3 = pbMem;
6391# endif
6392 }
6393
6394 /*
6395 * Check the physical page level access and mapping.
6396 */
6397 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6398 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6399 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6400 { /* probably likely */ }
6401 else
6402 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6403 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6404 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6405 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6406 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6407 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6408
6409 if (pbMem)
6410 {
6411 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6412 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6413 fAccess |= IEM_ACCESS_NOT_LOCKED;
6414 }
6415 else
6416 {
6417 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6418 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6419 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6420 if (rcStrict != VINF_SUCCESS)
6421 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6422 }
6423
6424 void * const pvMem = pbMem;
6425
6426 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6427 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6428 if (fAccess & IEM_ACCESS_TYPE_READ)
6429 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6430
6431#else /* !IEM_WITH_DATA_TLB */
6432
6433 RTGCPHYS GCPhysFirst;
6434 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6435 if (rcStrict != VINF_SUCCESS)
6436 return rcStrict;
6437
6438 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6439 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6440 if (fAccess & IEM_ACCESS_TYPE_READ)
6441 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6442
6443 void *pvMem;
6444 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6445 if (rcStrict != VINF_SUCCESS)
6446 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6447
6448#endif /* !IEM_WITH_DATA_TLB */
6449
6450 /*
6451 * Fill in the mapping table entry.
6452 */
6453 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6454 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6455 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6456 pVCpu->iem.s.cActiveMappings += 1;
6457
6458 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6459 *ppvMem = pvMem;
6460
6461 return VINF_SUCCESS;
6462}
6463
6464
6465/**
6466 * Commits the guest memory if bounce buffered and unmaps it.
6467 *
6468 * @returns Strict VBox status code.
6469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6470 * @param pvMem The mapping.
6471 * @param fAccess The kind of access.
6472 */
6473VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6474{
6475 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6476 AssertReturn(iMemMap >= 0, iMemMap);
6477
6478 /* If it's bounce buffered, we may need to write back the buffer. */
6479 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6480 {
6481 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6482 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6483 }
6484 /* Otherwise unlock it. */
6485 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6486 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6487
6488 /* Free the entry. */
6489 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6490 Assert(pVCpu->iem.s.cActiveMappings != 0);
6491 pVCpu->iem.s.cActiveMappings--;
6492 return VINF_SUCCESS;
6493}
6494
6495#ifdef IEM_WITH_SETJMP
6496
6497/**
6498 * Maps the specified guest memory for the given kind of access, longjmp on
6499 * error.
6500 *
6501 * This may be using bounce buffering of the memory if it's crossing a page
6502 * boundary or if there is an access handler installed for any of it. Because
6503 * of lock prefix guarantees, we're in for some extra clutter when this
6504 * happens.
6505 *
6506 * This may raise a \#GP, \#SS, \#PF or \#AC.
6507 *
6508 * @returns Pointer to the mapped memory.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6511 * @param cbMem The number of bytes to map. This is usually 1,
6512 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6513 * string operations it can be up to a page.
6514 * @param iSegReg The index of the segment register to use for
6515 * this access. The base and limits are checked.
6516 * Use UINT8_MAX to indicate that no segmentation
6517 * is required (for IDT, GDT and LDT accesses).
6518 * @param GCPtrMem The address of the guest memory.
6519 * @param fAccess How the memory is being accessed. The
6520 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6521 * how to map the memory, while the
6522 * IEM_ACCESS_WHAT_XXX bit is used when raising
6523 * exceptions.
6524 * @param uAlignCtl Alignment control:
6525 * - Bits 15:0 is the alignment mask.
6526 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6527 * IEM_MEMMAP_F_ALIGN_SSE, and
6528 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6529 * Pass zero to skip alignment.
6530 */
6531void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6532 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6533{
6534 /*
6535 * Check the input, check segment access and adjust address
6536 * with segment base.
6537 */
6538 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6539 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6540 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6541
6542 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6543 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6544 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6545
6546 /*
6547 * Alignment check.
6548 */
6549 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6550 { /* likelyish */ }
6551 else
6552 {
6553 /* Misaligned access. */
6554 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6555 {
6556 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6557 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6558 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6559 {
6560 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6561
6562 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6563 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6564 }
6565 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6566 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6567 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6568 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6569 * that's what FXSAVE does on a 10980xe. */
6570 && iemMemAreAlignmentChecksEnabled(pVCpu))
6571 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6572 else
6573 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6574 }
6575 }
6576
6577 /*
6578 * Figure out which mapping entry to use.
6579 */
6580 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6581 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6582 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6583 {
6584 iMemMap = iemMemMapFindFree(pVCpu);
6585 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6586 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6587 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6588 pVCpu->iem.s.aMemMappings[2].fAccess),
6589 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6590 }
6591
6592 /*
6593 * Crossing a page boundary?
6594 */
6595 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6596 { /* No (likely). */ }
6597 else
6598 {
6599 void *pvMem;
6600 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6601 if (rcStrict == VINF_SUCCESS)
6602 return pvMem;
6603 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6604 }
6605
6606#ifdef IEM_WITH_DATA_TLB
6607 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6608
6609 /*
6610 * Get the TLB entry for this page.
6611 */
6612 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6613 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6614 if (pTlbe->uTag == uTag)
6615 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6616 else
6617 {
6618 pVCpu->iem.s.DataTlb.cTlbMisses++;
6619 PGMPTWALK Walk;
6620 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6621 if (RT_FAILURE(rc))
6622 {
6623 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6624# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6625 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6626 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6627# endif
6628 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6629 }
6630
6631 Assert(Walk.fSucceeded);
6632 pTlbe->uTag = uTag;
6633 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6634 pTlbe->GCPhys = Walk.GCPhys;
6635 pTlbe->pbMappingR3 = NULL;
6636 }
6637
6638 /*
6639 * Check the flags and physical revision.
6640 */
6641 /** @todo make the caller pass these in with fAccess. */
6642 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6643 ? IEMTLBE_F_PT_NO_USER : 0;
6644 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6645 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6646 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6647 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6648 ? IEMTLBE_F_PT_NO_WRITE : 0)
6649 : 0;
6650 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6651 uint8_t *pbMem = NULL;
6652 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6653 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6654# ifdef IN_RING3
6655 pbMem = pTlbe->pbMappingR3;
6656# else
6657 pbMem = NULL;
6658# endif
6659 else
6660 {
6661 /*
6662 * Okay, something isn't quite right or needs refreshing.
6663 */
6664 /* Write to read only memory? */
6665 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6666 {
6667 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6668# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6669 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6670 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6671# endif
6672 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6673 }
6674
6675 /* Kernel memory accessed by userland? */
6676 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6677 {
6678 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6679# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6680 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6681 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6682# endif
6683 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6684 }
6685
6686 /* Set the dirty / access flags.
6687 ASSUMES this is set when the address is translated rather than on commit... */
6688 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6689 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6690 {
6691 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6692 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6693 AssertRC(rc2);
6694 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6695 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6696 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6697 }
6698
6699 /*
6700 * Check if the physical page info needs updating.
6701 */
6702 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6703# ifdef IN_RING3
6704 pbMem = pTlbe->pbMappingR3;
6705# else
6706 pbMem = NULL;
6707# endif
6708 else
6709 {
6710 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6711 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6712 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6714 pTlbe->pbMappingR3 = NULL;
6715 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6716 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6717 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6718 &pbMem, &pTlbe->fFlagsAndPhysRev);
6719 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6720# ifdef IN_RING3
6721 pTlbe->pbMappingR3 = pbMem;
6722# endif
6723 }
6724
6725 /*
6726 * Check the physical page level access and mapping.
6727 */
6728 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6729 { /* probably likely */ }
6730 else
6731 {
6732 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6733 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6734 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6735 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6736 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6737 if (rcStrict == VINF_SUCCESS)
6738 return pbMem;
6739 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6740 }
6741 }
6742 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6743
6744 if (pbMem)
6745 {
6746 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6747 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6748 fAccess |= IEM_ACCESS_NOT_LOCKED;
6749 }
6750 else
6751 {
6752 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6753 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6754 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6755 if (rcStrict == VINF_SUCCESS)
6756 return pbMem;
6757 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6758 }
6759
6760 void * const pvMem = pbMem;
6761
6762 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6763 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6764 if (fAccess & IEM_ACCESS_TYPE_READ)
6765 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6766
6767#else /* !IEM_WITH_DATA_TLB */
6768
6769
6770 RTGCPHYS GCPhysFirst;
6771 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6772 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6773 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6774
6775 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6776 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6777 if (fAccess & IEM_ACCESS_TYPE_READ)
6778 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6779
6780 void *pvMem;
6781 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6782 if (rcStrict == VINF_SUCCESS)
6783 { /* likely */ }
6784 else
6785 {
6786 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6787 if (rcStrict == VINF_SUCCESS)
6788 return pvMem;
6789 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6790 }
6791
6792#endif /* !IEM_WITH_DATA_TLB */
6793
6794 /*
6795 * Fill in the mapping table entry.
6796 */
6797 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6798 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6799 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6800 pVCpu->iem.s.cActiveMappings++;
6801
6802 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6803 return pvMem;
6804}
6805
6806
6807/**
6808 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6809 *
6810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6811 * @param pvMem The mapping.
6812 * @param fAccess The kind of access.
6813 */
6814void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6815{
6816 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6817 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6818
6819 /* If it's bounce buffered, we may need to write back the buffer. */
6820 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6821 {
6822 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6823 {
6824 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6825 if (rcStrict == VINF_SUCCESS)
6826 return;
6827 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6828 }
6829 }
6830 /* Otherwise unlock it. */
6831 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6832 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6833
6834 /* Free the entry. */
6835 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6836 Assert(pVCpu->iem.s.cActiveMappings != 0);
6837 pVCpu->iem.s.cActiveMappings--;
6838}
6839
6840#endif /* IEM_WITH_SETJMP */
6841
6842#ifndef IN_RING3
6843/**
6844 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6845 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6846 *
6847 * Allows the instruction to be completed and retired, while the IEM user will
6848 * return to ring-3 immediately afterwards and do the postponed writes there.
6849 *
6850 * @returns VBox status code (no strict statuses). Caller must check
6851 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6853 * @param pvMem The mapping.
6854 * @param fAccess The kind of access.
6855 */
6856VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6857{
6858 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6859 AssertReturn(iMemMap >= 0, iMemMap);
6860
6861 /* If it's bounce buffered, we may need to write back the buffer. */
6862 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6863 {
6864 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6865 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6866 }
6867 /* Otherwise unlock it. */
6868 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6869 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6870
6871 /* Free the entry. */
6872 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6873 Assert(pVCpu->iem.s.cActiveMappings != 0);
6874 pVCpu->iem.s.cActiveMappings--;
6875 return VINF_SUCCESS;
6876}
6877#endif
6878
6879
6880/**
6881 * Rollbacks mappings, releasing page locks and such.
6882 *
6883 * The caller shall only call this after checking cActiveMappings.
6884 *
6885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6886 */
6887void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6888{
6889 Assert(pVCpu->iem.s.cActiveMappings > 0);
6890
6891 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6892 while (iMemMap-- > 0)
6893 {
6894 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6895 if (fAccess != IEM_ACCESS_INVALID)
6896 {
6897 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6898 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6899 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6900 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6901 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6902 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6903 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6905 pVCpu->iem.s.cActiveMappings--;
6906 }
6907 }
6908}
6909
6910
6911/**
6912 * Fetches a data byte.
6913 *
6914 * @returns Strict VBox status code.
6915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6916 * @param pu8Dst Where to return the byte.
6917 * @param iSegReg The index of the segment register to use for
6918 * this access. The base and limits are checked.
6919 * @param GCPtrMem The address of the guest memory.
6920 */
6921VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6922{
6923 /* The lazy approach for now... */
6924 uint8_t const *pu8Src;
6925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6926 if (rc == VINF_SUCCESS)
6927 {
6928 *pu8Dst = *pu8Src;
6929 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6930 }
6931 return rc;
6932}
6933
6934
6935#ifdef IEM_WITH_SETJMP
6936/**
6937 * Fetches a data byte, longjmp on error.
6938 *
6939 * @returns The byte.
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 * @param iSegReg The index of the segment register to use for
6942 * this access. The base and limits are checked.
6943 * @param GCPtrMem The address of the guest memory.
6944 */
6945uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6946{
6947 /* The lazy approach for now... */
6948 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6949 uint8_t const bRet = *pu8Src;
6950 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6951 return bRet;
6952}
6953#endif /* IEM_WITH_SETJMP */
6954
6955
6956/**
6957 * Fetches a data word.
6958 *
6959 * @returns Strict VBox status code.
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 * @param pu16Dst Where to return the word.
6962 * @param iSegReg The index of the segment register to use for
6963 * this access. The base and limits are checked.
6964 * @param GCPtrMem The address of the guest memory.
6965 */
6966VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6967{
6968 /* The lazy approach for now... */
6969 uint16_t const *pu16Src;
6970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6971 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6972 if (rc == VINF_SUCCESS)
6973 {
6974 *pu16Dst = *pu16Src;
6975 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6976 }
6977 return rc;
6978}
6979
6980
6981#ifdef IEM_WITH_SETJMP
6982/**
6983 * Fetches a data word, longjmp on error.
6984 *
6985 * @returns The word
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 * @param iSegReg The index of the segment register to use for
6988 * this access. The base and limits are checked.
6989 * @param GCPtrMem The address of the guest memory.
6990 */
6991uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6992{
6993 /* The lazy approach for now... */
6994 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6995 sizeof(*pu16Src) - 1);
6996 uint16_t const u16Ret = *pu16Src;
6997 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6998 return u16Ret;
6999}
7000#endif
7001
7002
7003/**
7004 * Fetches a data dword.
7005 *
7006 * @returns Strict VBox status code.
7007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7008 * @param pu32Dst Where to return the dword.
7009 * @param iSegReg The index of the segment register to use for
7010 * this access. The base and limits are checked.
7011 * @param GCPtrMem The address of the guest memory.
7012 */
7013VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7014{
7015 /* The lazy approach for now... */
7016 uint32_t const *pu32Src;
7017 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7018 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7019 if (rc == VINF_SUCCESS)
7020 {
7021 *pu32Dst = *pu32Src;
7022 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7023 }
7024 return rc;
7025}
7026
7027
7028/**
7029 * Fetches a data dword and zero extends it to a qword.
7030 *
7031 * @returns Strict VBox status code.
7032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7033 * @param pu64Dst Where to return the qword.
7034 * @param iSegReg The index of the segment register to use for
7035 * this access. The base and limits are checked.
7036 * @param GCPtrMem The address of the guest memory.
7037 */
7038VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7039{
7040 /* The lazy approach for now... */
7041 uint32_t const *pu32Src;
7042 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7043 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7044 if (rc == VINF_SUCCESS)
7045 {
7046 *pu64Dst = *pu32Src;
7047 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7048 }
7049 return rc;
7050}
7051
7052
7053#ifdef IEM_WITH_SETJMP
7054
7055/**
7056 * Fetches a data dword, longjmp on error, fallback/safe version.
7057 *
7058 * @returns The dword
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 * @param iSegReg The index of the segment register to use for
7061 * this access. The base and limits are checked.
7062 * @param GCPtrMem The address of the guest memory.
7063 */
7064uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7065{
7066 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7067 sizeof(*pu32Src) - 1);
7068 uint32_t const u32Ret = *pu32Src;
7069 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7070 return u32Ret;
7071}
7072
7073
7074/**
7075 * Fetches a data dword, longjmp on error.
7076 *
7077 * @returns The dword
7078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7079 * @param iSegReg The index of the segment register to use for
7080 * this access. The base and limits are checked.
7081 * @param GCPtrMem The address of the guest memory.
7082 */
7083uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7084{
7085# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7086 /*
7087 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7088 */
7089 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7090 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7091 {
7092 /*
7093 * TLB lookup.
7094 */
7095 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7096 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7097 if (pTlbe->uTag == uTag)
7098 {
7099 /*
7100 * Check TLB page table level access flags.
7101 */
7102 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7103 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7104 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7105 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7106 {
7107 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7108
7109 /*
7110 * Alignment check:
7111 */
7112 /** @todo check priority \#AC vs \#PF */
7113 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7114 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7115 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7116 || IEM_GET_CPL(pVCpu) != 3)
7117 {
7118 /*
7119 * Fetch and return the dword
7120 */
7121 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7122 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7123 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7124 }
7125 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7126 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7127 }
7128 }
7129 }
7130
7131 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7132 outdated page pointer, or other troubles. */
7133 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7134 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7135
7136# else
7137 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7138 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7139 uint32_t const u32Ret = *pu32Src;
7140 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7141 return u32Ret;
7142# endif
7143}
7144#endif
7145
7146
7147#ifdef SOME_UNUSED_FUNCTION
7148/**
7149 * Fetches a data dword and sign extends it to a qword.
7150 *
7151 * @returns Strict VBox status code.
7152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7153 * @param pu64Dst Where to return the sign extended value.
7154 * @param iSegReg The index of the segment register to use for
7155 * this access. The base and limits are checked.
7156 * @param GCPtrMem The address of the guest memory.
7157 */
7158VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7159{
7160 /* The lazy approach for now... */
7161 int32_t const *pi32Src;
7162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7163 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7164 if (rc == VINF_SUCCESS)
7165 {
7166 *pu64Dst = *pi32Src;
7167 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7168 }
7169#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7170 else
7171 *pu64Dst = 0;
7172#endif
7173 return rc;
7174}
7175#endif
7176
7177
7178/**
7179 * Fetches a data qword.
7180 *
7181 * @returns Strict VBox status code.
7182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7183 * @param pu64Dst Where to return the qword.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 */
7188VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7189{
7190 /* The lazy approach for now... */
7191 uint64_t const *pu64Src;
7192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7193 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7194 if (rc == VINF_SUCCESS)
7195 {
7196 *pu64Dst = *pu64Src;
7197 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7198 }
7199 return rc;
7200}
7201
7202
7203#ifdef IEM_WITH_SETJMP
7204/**
7205 * Fetches a data qword, longjmp on error.
7206 *
7207 * @returns The qword.
7208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7209 * @param iSegReg The index of the segment register to use for
7210 * this access. The base and limits are checked.
7211 * @param GCPtrMem The address of the guest memory.
7212 */
7213uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7214{
7215 /* The lazy approach for now... */
7216 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7217 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7218 uint64_t const u64Ret = *pu64Src;
7219 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7220 return u64Ret;
7221}
7222#endif
7223
7224
7225/**
7226 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7227 *
7228 * @returns Strict VBox status code.
7229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7230 * @param pu64Dst Where to return the qword.
7231 * @param iSegReg The index of the segment register to use for
7232 * this access. The base and limits are checked.
7233 * @param GCPtrMem The address of the guest memory.
7234 */
7235VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7236{
7237 /* The lazy approach for now... */
7238 uint64_t const *pu64Src;
7239 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7240 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7241 if (rc == VINF_SUCCESS)
7242 {
7243 *pu64Dst = *pu64Src;
7244 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7245 }
7246 return rc;
7247}
7248
7249
7250#ifdef IEM_WITH_SETJMP
7251/**
7252 * Fetches a data qword, longjmp on error.
7253 *
7254 * @returns The qword.
7255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7256 * @param iSegReg The index of the segment register to use for
7257 * this access. The base and limits are checked.
7258 * @param GCPtrMem The address of the guest memory.
7259 */
7260uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7261{
7262 /* The lazy approach for now... */
7263 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7264 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7265 uint64_t const u64Ret = *pu64Src;
7266 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7267 return u64Ret;
7268}
7269#endif
7270
7271
7272/**
7273 * Fetches a data tword.
7274 *
7275 * @returns Strict VBox status code.
7276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7277 * @param pr80Dst Where to return the tword.
7278 * @param iSegReg The index of the segment register to use for
7279 * this access. The base and limits are checked.
7280 * @param GCPtrMem The address of the guest memory.
7281 */
7282VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7283{
7284 /* The lazy approach for now... */
7285 PCRTFLOAT80U pr80Src;
7286 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7287 if (rc == VINF_SUCCESS)
7288 {
7289 *pr80Dst = *pr80Src;
7290 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7291 }
7292 return rc;
7293}
7294
7295
7296#ifdef IEM_WITH_SETJMP
7297/**
7298 * Fetches a data tword, longjmp on error.
7299 *
7300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7301 * @param pr80Dst Where to return the tword.
7302 * @param iSegReg The index of the segment register to use for
7303 * this access. The base and limits are checked.
7304 * @param GCPtrMem The address of the guest memory.
7305 */
7306void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7307{
7308 /* The lazy approach for now... */
7309 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7310 *pr80Dst = *pr80Src;
7311 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7312}
7313#endif
7314
7315
7316/**
7317 * Fetches a data decimal tword.
7318 *
7319 * @returns Strict VBox status code.
7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7321 * @param pd80Dst Where to return the tword.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 */
7326VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7327{
7328 /* The lazy approach for now... */
7329 PCRTPBCD80U pd80Src;
7330 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7331 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7332 if (rc == VINF_SUCCESS)
7333 {
7334 *pd80Dst = *pd80Src;
7335 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7336 }
7337 return rc;
7338}
7339
7340
7341#ifdef IEM_WITH_SETJMP
7342/**
7343 * Fetches a data decimal tword, longjmp on error.
7344 *
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param pd80Dst Where to return the tword.
7347 * @param iSegReg The index of the segment register to use for
7348 * this access. The base and limits are checked.
7349 * @param GCPtrMem The address of the guest memory.
7350 */
7351void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7352{
7353 /* The lazy approach for now... */
7354 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7355 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7356 *pd80Dst = *pd80Src;
7357 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7358}
7359#endif
7360
7361
7362/**
7363 * Fetches a data dqword (double qword), generally SSE related.
7364 *
7365 * @returns Strict VBox status code.
7366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7367 * @param pu128Dst Where to return the qword.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 */
7372VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7373{
7374 /* The lazy approach for now... */
7375 PCRTUINT128U pu128Src;
7376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7377 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7378 if (rc == VINF_SUCCESS)
7379 {
7380 pu128Dst->au64[0] = pu128Src->au64[0];
7381 pu128Dst->au64[1] = pu128Src->au64[1];
7382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7383 }
7384 return rc;
7385}
7386
7387
7388#ifdef IEM_WITH_SETJMP
7389/**
7390 * Fetches a data dqword (double qword), generally SSE related.
7391 *
7392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7393 * @param pu128Dst Where to return the qword.
7394 * @param iSegReg The index of the segment register to use for
7395 * this access. The base and limits are checked.
7396 * @param GCPtrMem The address of the guest memory.
7397 */
7398void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7399{
7400 /* The lazy approach for now... */
7401 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7402 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7403 pu128Dst->au64[0] = pu128Src->au64[0];
7404 pu128Dst->au64[1] = pu128Src->au64[1];
7405 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7406}
7407#endif
7408
7409
7410/**
7411 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7412 * related.
7413 *
7414 * Raises \#GP(0) if not aligned.
7415 *
7416 * @returns Strict VBox status code.
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param pu128Dst Where to return the qword.
7419 * @param iSegReg The index of the segment register to use for
7420 * this access. The base and limits are checked.
7421 * @param GCPtrMem The address of the guest memory.
7422 */
7423VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7424{
7425 /* The lazy approach for now... */
7426 PCRTUINT128U pu128Src;
7427 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7428 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7429 if (rc == VINF_SUCCESS)
7430 {
7431 pu128Dst->au64[0] = pu128Src->au64[0];
7432 pu128Dst->au64[1] = pu128Src->au64[1];
7433 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7434 }
7435 return rc;
7436}
7437
7438
7439#ifdef IEM_WITH_SETJMP
7440/**
7441 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7442 * related, longjmp on error.
7443 *
7444 * Raises \#GP(0) if not aligned.
7445 *
7446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7447 * @param pu128Dst Where to return the qword.
7448 * @param iSegReg The index of the segment register to use for
7449 * this access. The base and limits are checked.
7450 * @param GCPtrMem The address of the guest memory.
7451 */
7452void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7453 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7454{
7455 /* The lazy approach for now... */
7456 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7457 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7458 pu128Dst->au64[0] = pu128Src->au64[0];
7459 pu128Dst->au64[1] = pu128Src->au64[1];
7460 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7461}
7462#endif
7463
7464
7465/**
7466 * Fetches a data oword (octo word), generally AVX related.
7467 *
7468 * @returns Strict VBox status code.
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 * @param pu256Dst Where to return the qword.
7471 * @param iSegReg The index of the segment register to use for
7472 * this access. The base and limits are checked.
7473 * @param GCPtrMem The address of the guest memory.
7474 */
7475VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7476{
7477 /* The lazy approach for now... */
7478 PCRTUINT256U pu256Src;
7479 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7480 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7481 if (rc == VINF_SUCCESS)
7482 {
7483 pu256Dst->au64[0] = pu256Src->au64[0];
7484 pu256Dst->au64[1] = pu256Src->au64[1];
7485 pu256Dst->au64[2] = pu256Src->au64[2];
7486 pu256Dst->au64[3] = pu256Src->au64[3];
7487 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7488 }
7489 return rc;
7490}
7491
7492
7493#ifdef IEM_WITH_SETJMP
7494/**
7495 * Fetches a data oword (octo word), generally AVX related.
7496 *
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param pu256Dst Where to return the qword.
7499 * @param iSegReg The index of the segment register to use for
7500 * this access. The base and limits are checked.
7501 * @param GCPtrMem The address of the guest memory.
7502 */
7503void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7504{
7505 /* The lazy approach for now... */
7506 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7507 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7508 pu256Dst->au64[0] = pu256Src->au64[0];
7509 pu256Dst->au64[1] = pu256Src->au64[1];
7510 pu256Dst->au64[2] = pu256Src->au64[2];
7511 pu256Dst->au64[3] = pu256Src->au64[3];
7512 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7513}
7514#endif
7515
7516
7517/**
7518 * Fetches a data oword (octo word) at an aligned address, generally AVX
7519 * related.
7520 *
7521 * Raises \#GP(0) if not aligned.
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param pu256Dst Where to return the qword.
7526 * @param iSegReg The index of the segment register to use for
7527 * this access. The base and limits are checked.
7528 * @param GCPtrMem The address of the guest memory.
7529 */
7530VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7531{
7532 /* The lazy approach for now... */
7533 PCRTUINT256U pu256Src;
7534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7535 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7536 if (rc == VINF_SUCCESS)
7537 {
7538 pu256Dst->au64[0] = pu256Src->au64[0];
7539 pu256Dst->au64[1] = pu256Src->au64[1];
7540 pu256Dst->au64[2] = pu256Src->au64[2];
7541 pu256Dst->au64[3] = pu256Src->au64[3];
7542 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7543 }
7544 return rc;
7545}
7546
7547
7548#ifdef IEM_WITH_SETJMP
7549/**
7550 * Fetches a data oword (octo word) at an aligned address, generally AVX
7551 * related, longjmp on error.
7552 *
7553 * Raises \#GP(0) if not aligned.
7554 *
7555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7556 * @param pu256Dst Where to return the qword.
7557 * @param iSegReg The index of the segment register to use for
7558 * this access. The base and limits are checked.
7559 * @param GCPtrMem The address of the guest memory.
7560 */
7561void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7562 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7563{
7564 /* The lazy approach for now... */
7565 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7566 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7567 pu256Dst->au64[0] = pu256Src->au64[0];
7568 pu256Dst->au64[1] = pu256Src->au64[1];
7569 pu256Dst->au64[2] = pu256Src->au64[2];
7570 pu256Dst->au64[3] = pu256Src->au64[3];
7571 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7572}
7573#endif
7574
7575
7576
7577/**
7578 * Fetches a descriptor register (lgdt, lidt).
7579 *
7580 * @returns Strict VBox status code.
7581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7582 * @param pcbLimit Where to return the limit.
7583 * @param pGCPtrBase Where to return the base.
7584 * @param iSegReg The index of the segment register to use for
7585 * this access. The base and limits are checked.
7586 * @param GCPtrMem The address of the guest memory.
7587 * @param enmOpSize The effective operand size.
7588 */
7589VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7590 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7591{
7592 /*
7593 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7594 * little special:
7595 * - The two reads are done separately.
7596 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7597 * - We suspect the 386 to actually commit the limit before the base in
7598 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7599 * don't try emulate this eccentric behavior, because it's not well
7600 * enough understood and rather hard to trigger.
7601 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7602 */
7603 VBOXSTRICTRC rcStrict;
7604 if (IEM_IS_64BIT_CODE(pVCpu))
7605 {
7606 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7607 if (rcStrict == VINF_SUCCESS)
7608 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7609 }
7610 else
7611 {
7612 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7613 if (enmOpSize == IEMMODE_32BIT)
7614 {
7615 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7616 {
7617 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7618 if (rcStrict == VINF_SUCCESS)
7619 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7620 }
7621 else
7622 {
7623 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7624 if (rcStrict == VINF_SUCCESS)
7625 {
7626 *pcbLimit = (uint16_t)uTmp;
7627 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7628 }
7629 }
7630 if (rcStrict == VINF_SUCCESS)
7631 *pGCPtrBase = uTmp;
7632 }
7633 else
7634 {
7635 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7636 if (rcStrict == VINF_SUCCESS)
7637 {
7638 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7639 if (rcStrict == VINF_SUCCESS)
7640 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7641 }
7642 }
7643 }
7644 return rcStrict;
7645}
7646
7647
7648
7649/**
7650 * Stores a data byte.
7651 *
7652 * @returns Strict VBox status code.
7653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7654 * @param iSegReg The index of the segment register to use for
7655 * this access. The base and limits are checked.
7656 * @param GCPtrMem The address of the guest memory.
7657 * @param u8Value The value to store.
7658 */
7659VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7660{
7661 /* The lazy approach for now... */
7662 uint8_t *pu8Dst;
7663 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7664 if (rc == VINF_SUCCESS)
7665 {
7666 *pu8Dst = u8Value;
7667 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7668 }
7669 return rc;
7670}
7671
7672
7673#ifdef IEM_WITH_SETJMP
7674/**
7675 * Stores a data byte, longjmp on error.
7676 *
7677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7678 * @param iSegReg The index of the segment register to use for
7679 * this access. The base and limits are checked.
7680 * @param GCPtrMem The address of the guest memory.
7681 * @param u8Value The value to store.
7682 */
7683void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7684{
7685 /* The lazy approach for now... */
7686 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7687 *pu8Dst = u8Value;
7688 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7689}
7690#endif
7691
7692
7693/**
7694 * Stores a data word.
7695 *
7696 * @returns Strict VBox status code.
7697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7698 * @param iSegReg The index of the segment register to use for
7699 * this access. The base and limits are checked.
7700 * @param GCPtrMem The address of the guest memory.
7701 * @param u16Value The value to store.
7702 */
7703VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7704{
7705 /* The lazy approach for now... */
7706 uint16_t *pu16Dst;
7707 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7708 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7709 if (rc == VINF_SUCCESS)
7710 {
7711 *pu16Dst = u16Value;
7712 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7713 }
7714 return rc;
7715}
7716
7717
7718#ifdef IEM_WITH_SETJMP
7719/**
7720 * Stores a data word, longjmp on error.
7721 *
7722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7723 * @param iSegReg The index of the segment register to use for
7724 * this access. The base and limits are checked.
7725 * @param GCPtrMem The address of the guest memory.
7726 * @param u16Value The value to store.
7727 */
7728void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7729{
7730 /* The lazy approach for now... */
7731 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7732 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7733 *pu16Dst = u16Value;
7734 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7735}
7736#endif
7737
7738
7739/**
7740 * Stores a data dword.
7741 *
7742 * @returns Strict VBox status code.
7743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7744 * @param iSegReg The index of the segment register to use for
7745 * this access. The base and limits are checked.
7746 * @param GCPtrMem The address of the guest memory.
7747 * @param u32Value The value to store.
7748 */
7749VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7750{
7751 /* The lazy approach for now... */
7752 uint32_t *pu32Dst;
7753 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7754 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7755 if (rc == VINF_SUCCESS)
7756 {
7757 *pu32Dst = u32Value;
7758 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7759 }
7760 return rc;
7761}
7762
7763
7764#ifdef IEM_WITH_SETJMP
7765/**
7766 * Stores a data dword.
7767 *
7768 * @returns Strict VBox status code.
7769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7770 * @param iSegReg The index of the segment register to use for
7771 * this access. The base and limits are checked.
7772 * @param GCPtrMem The address of the guest memory.
7773 * @param u32Value The value to store.
7774 */
7775void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7776{
7777 /* The lazy approach for now... */
7778 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7779 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7780 *pu32Dst = u32Value;
7781 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7782}
7783#endif
7784
7785
7786/**
7787 * Stores a data qword.
7788 *
7789 * @returns Strict VBox status code.
7790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7791 * @param iSegReg The index of the segment register to use for
7792 * this access. The base and limits are checked.
7793 * @param GCPtrMem The address of the guest memory.
7794 * @param u64Value The value to store.
7795 */
7796VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7797{
7798 /* The lazy approach for now... */
7799 uint64_t *pu64Dst;
7800 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7801 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7802 if (rc == VINF_SUCCESS)
7803 {
7804 *pu64Dst = u64Value;
7805 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7806 }
7807 return rc;
7808}
7809
7810
7811#ifdef IEM_WITH_SETJMP
7812/**
7813 * Stores a data qword, longjmp on error.
7814 *
7815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7816 * @param iSegReg The index of the segment register to use for
7817 * this access. The base and limits are checked.
7818 * @param GCPtrMem The address of the guest memory.
7819 * @param u64Value The value to store.
7820 */
7821void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7822{
7823 /* The lazy approach for now... */
7824 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7825 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7826 *pu64Dst = u64Value;
7827 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7828}
7829#endif
7830
7831
7832/**
7833 * Stores a data dqword.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param iSegReg The index of the segment register to use for
7838 * this access. The base and limits are checked.
7839 * @param GCPtrMem The address of the guest memory.
7840 * @param u128Value The value to store.
7841 */
7842VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7843{
7844 /* The lazy approach for now... */
7845 PRTUINT128U pu128Dst;
7846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7847 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7848 if (rc == VINF_SUCCESS)
7849 {
7850 pu128Dst->au64[0] = u128Value.au64[0];
7851 pu128Dst->au64[1] = u128Value.au64[1];
7852 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7853 }
7854 return rc;
7855}
7856
7857
7858#ifdef IEM_WITH_SETJMP
7859/**
7860 * Stores a data dqword, longjmp on error.
7861 *
7862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7863 * @param iSegReg The index of the segment register to use for
7864 * this access. The base and limits are checked.
7865 * @param GCPtrMem The address of the guest memory.
7866 * @param u128Value The value to store.
7867 */
7868void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7869{
7870 /* The lazy approach for now... */
7871 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7872 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7873 pu128Dst->au64[0] = u128Value.au64[0];
7874 pu128Dst->au64[1] = u128Value.au64[1];
7875 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7876}
7877#endif
7878
7879
7880/**
7881 * Stores a data dqword, SSE aligned.
7882 *
7883 * @returns Strict VBox status code.
7884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7885 * @param iSegReg The index of the segment register to use for
7886 * this access. The base and limits are checked.
7887 * @param GCPtrMem The address of the guest memory.
7888 * @param u128Value The value to store.
7889 */
7890VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7891{
7892 /* The lazy approach for now... */
7893 PRTUINT128U pu128Dst;
7894 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7895 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7896 if (rc == VINF_SUCCESS)
7897 {
7898 pu128Dst->au64[0] = u128Value.au64[0];
7899 pu128Dst->au64[1] = u128Value.au64[1];
7900 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7901 }
7902 return rc;
7903}
7904
7905
7906#ifdef IEM_WITH_SETJMP
7907/**
7908 * Stores a data dqword, SSE aligned.
7909 *
7910 * @returns Strict VBox status code.
7911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7912 * @param iSegReg The index of the segment register to use for
7913 * this access. The base and limits are checked.
7914 * @param GCPtrMem The address of the guest memory.
7915 * @param u128Value The value to store.
7916 */
7917void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7918 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7919{
7920 /* The lazy approach for now... */
7921 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7922 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7923 pu128Dst->au64[0] = u128Value.au64[0];
7924 pu128Dst->au64[1] = u128Value.au64[1];
7925 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7926}
7927#endif
7928
7929
7930/**
7931 * Stores a data dqword.
7932 *
7933 * @returns Strict VBox status code.
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param iSegReg The index of the segment register to use for
7936 * this access. The base and limits are checked.
7937 * @param GCPtrMem The address of the guest memory.
7938 * @param pu256Value Pointer to the value to store.
7939 */
7940VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7941{
7942 /* The lazy approach for now... */
7943 PRTUINT256U pu256Dst;
7944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7945 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7946 if (rc == VINF_SUCCESS)
7947 {
7948 pu256Dst->au64[0] = pu256Value->au64[0];
7949 pu256Dst->au64[1] = pu256Value->au64[1];
7950 pu256Dst->au64[2] = pu256Value->au64[2];
7951 pu256Dst->au64[3] = pu256Value->au64[3];
7952 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7953 }
7954 return rc;
7955}
7956
7957
7958#ifdef IEM_WITH_SETJMP
7959/**
7960 * Stores a data dqword, longjmp on error.
7961 *
7962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7963 * @param iSegReg The index of the segment register to use for
7964 * this access. The base and limits are checked.
7965 * @param GCPtrMem The address of the guest memory.
7966 * @param pu256Value Pointer to the value to store.
7967 */
7968void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7969{
7970 /* The lazy approach for now... */
7971 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7972 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7973 pu256Dst->au64[0] = pu256Value->au64[0];
7974 pu256Dst->au64[1] = pu256Value->au64[1];
7975 pu256Dst->au64[2] = pu256Value->au64[2];
7976 pu256Dst->au64[3] = pu256Value->au64[3];
7977 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7978}
7979#endif
7980
7981
7982/**
7983 * Stores a data dqword, AVX \#GP(0) aligned.
7984 *
7985 * @returns Strict VBox status code.
7986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7987 * @param iSegReg The index of the segment register to use for
7988 * this access. The base and limits are checked.
7989 * @param GCPtrMem The address of the guest memory.
7990 * @param pu256Value Pointer to the value to store.
7991 */
7992VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7993{
7994 /* The lazy approach for now... */
7995 PRTUINT256U pu256Dst;
7996 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7997 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7998 if (rc == VINF_SUCCESS)
7999 {
8000 pu256Dst->au64[0] = pu256Value->au64[0];
8001 pu256Dst->au64[1] = pu256Value->au64[1];
8002 pu256Dst->au64[2] = pu256Value->au64[2];
8003 pu256Dst->au64[3] = pu256Value->au64[3];
8004 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8005 }
8006 return rc;
8007}
8008
8009
8010#ifdef IEM_WITH_SETJMP
8011/**
8012 * Stores a data dqword, AVX aligned.
8013 *
8014 * @returns Strict VBox status code.
8015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8016 * @param iSegReg The index of the segment register to use for
8017 * this access. The base and limits are checked.
8018 * @param GCPtrMem The address of the guest memory.
8019 * @param pu256Value Pointer to the value to store.
8020 */
8021void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8022 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8023{
8024 /* The lazy approach for now... */
8025 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8026 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8027 pu256Dst->au64[0] = pu256Value->au64[0];
8028 pu256Dst->au64[1] = pu256Value->au64[1];
8029 pu256Dst->au64[2] = pu256Value->au64[2];
8030 pu256Dst->au64[3] = pu256Value->au64[3];
8031 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8032}
8033#endif
8034
8035
8036/**
8037 * Stores a descriptor register (sgdt, sidt).
8038 *
8039 * @returns Strict VBox status code.
8040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8041 * @param cbLimit The limit.
8042 * @param GCPtrBase The base address.
8043 * @param iSegReg The index of the segment register to use for
8044 * this access. The base and limits are checked.
8045 * @param GCPtrMem The address of the guest memory.
8046 */
8047VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8048{
8049 /*
8050 * The SIDT and SGDT instructions actually stores the data using two
8051 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8052 * does not respond to opsize prefixes.
8053 */
8054 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8055 if (rcStrict == VINF_SUCCESS)
8056 {
8057 if (IEM_IS_16BIT_CODE(pVCpu))
8058 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8059 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8060 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8061 else if (IEM_IS_32BIT_CODE(pVCpu))
8062 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8063 else
8064 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8065 }
8066 return rcStrict;
8067}
8068
8069
8070/**
8071 * Pushes a word onto the stack.
8072 *
8073 * @returns Strict VBox status code.
8074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8075 * @param u16Value The value to push.
8076 */
8077VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8078{
8079 /* Increment the stack pointer. */
8080 uint64_t uNewRsp;
8081 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8082
8083 /* Write the word the lazy way. */
8084 uint16_t *pu16Dst;
8085 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8086 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8087 if (rc == VINF_SUCCESS)
8088 {
8089 *pu16Dst = u16Value;
8090 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8091 }
8092
8093 /* Commit the new RSP value unless we an access handler made trouble. */
8094 if (rc == VINF_SUCCESS)
8095 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8096
8097 return rc;
8098}
8099
8100
8101/**
8102 * Pushes a dword onto the stack.
8103 *
8104 * @returns Strict VBox status code.
8105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8106 * @param u32Value The value to push.
8107 */
8108VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8109{
8110 /* Increment the stack pointer. */
8111 uint64_t uNewRsp;
8112 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8113
8114 /* Write the dword the lazy way. */
8115 uint32_t *pu32Dst;
8116 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8117 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8118 if (rc == VINF_SUCCESS)
8119 {
8120 *pu32Dst = u32Value;
8121 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8122 }
8123
8124 /* Commit the new RSP value unless we an access handler made trouble. */
8125 if (rc == VINF_SUCCESS)
8126 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8127
8128 return rc;
8129}
8130
8131
8132/**
8133 * Pushes a dword segment register value onto the stack.
8134 *
8135 * @returns Strict VBox status code.
8136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8137 * @param u32Value The value to push.
8138 */
8139VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8140{
8141 /* Increment the stack pointer. */
8142 uint64_t uNewRsp;
8143 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8144
8145 /* The intel docs talks about zero extending the selector register
8146 value. My actual intel CPU here might be zero extending the value
8147 but it still only writes the lower word... */
8148 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8149 * happens when crossing an electric page boundrary, is the high word checked
8150 * for write accessibility or not? Probably it is. What about segment limits?
8151 * It appears this behavior is also shared with trap error codes.
8152 *
8153 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8154 * ancient hardware when it actually did change. */
8155 uint16_t *pu16Dst;
8156 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8157 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8158 if (rc == VINF_SUCCESS)
8159 {
8160 *pu16Dst = (uint16_t)u32Value;
8161 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8162 }
8163
8164 /* Commit the new RSP value unless we an access handler made trouble. */
8165 if (rc == VINF_SUCCESS)
8166 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8167
8168 return rc;
8169}
8170
8171
8172/**
8173 * Pushes a qword onto the stack.
8174 *
8175 * @returns Strict VBox status code.
8176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8177 * @param u64Value The value to push.
8178 */
8179VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8180{
8181 /* Increment the stack pointer. */
8182 uint64_t uNewRsp;
8183 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8184
8185 /* Write the word the lazy way. */
8186 uint64_t *pu64Dst;
8187 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8188 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8189 if (rc == VINF_SUCCESS)
8190 {
8191 *pu64Dst = u64Value;
8192 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8193 }
8194
8195 /* Commit the new RSP value unless we an access handler made trouble. */
8196 if (rc == VINF_SUCCESS)
8197 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8198
8199 return rc;
8200}
8201
8202
8203/**
8204 * Pops a word from the stack.
8205 *
8206 * @returns Strict VBox status code.
8207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8208 * @param pu16Value Where to store the popped value.
8209 */
8210VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8211{
8212 /* Increment the stack pointer. */
8213 uint64_t uNewRsp;
8214 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8215
8216 /* Write the word the lazy way. */
8217 uint16_t const *pu16Src;
8218 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8219 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8220 if (rc == VINF_SUCCESS)
8221 {
8222 *pu16Value = *pu16Src;
8223 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8224
8225 /* Commit the new RSP value. */
8226 if (rc == VINF_SUCCESS)
8227 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8228 }
8229
8230 return rc;
8231}
8232
8233
8234/**
8235 * Pops a dword from the stack.
8236 *
8237 * @returns Strict VBox status code.
8238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8239 * @param pu32Value Where to store the popped value.
8240 */
8241VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8242{
8243 /* Increment the stack pointer. */
8244 uint64_t uNewRsp;
8245 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8246
8247 /* Write the word the lazy way. */
8248 uint32_t const *pu32Src;
8249 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8250 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8251 if (rc == VINF_SUCCESS)
8252 {
8253 *pu32Value = *pu32Src;
8254 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8255
8256 /* Commit the new RSP value. */
8257 if (rc == VINF_SUCCESS)
8258 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8259 }
8260
8261 return rc;
8262}
8263
8264
8265/**
8266 * Pops a qword from the stack.
8267 *
8268 * @returns Strict VBox status code.
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param pu64Value Where to store the popped value.
8271 */
8272VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8273{
8274 /* Increment the stack pointer. */
8275 uint64_t uNewRsp;
8276 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8277
8278 /* Write the word the lazy way. */
8279 uint64_t const *pu64Src;
8280 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8281 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8282 if (rc == VINF_SUCCESS)
8283 {
8284 *pu64Value = *pu64Src;
8285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8286
8287 /* Commit the new RSP value. */
8288 if (rc == VINF_SUCCESS)
8289 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8290 }
8291
8292 return rc;
8293}
8294
8295
8296/**
8297 * Pushes a word onto the stack, using a temporary stack pointer.
8298 *
8299 * @returns Strict VBox status code.
8300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8301 * @param u16Value The value to push.
8302 * @param pTmpRsp Pointer to the temporary stack pointer.
8303 */
8304VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8305{
8306 /* Increment the stack pointer. */
8307 RTUINT64U NewRsp = *pTmpRsp;
8308 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8309
8310 /* Write the word the lazy way. */
8311 uint16_t *pu16Dst;
8312 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8313 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8314 if (rc == VINF_SUCCESS)
8315 {
8316 *pu16Dst = u16Value;
8317 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8318 }
8319
8320 /* Commit the new RSP value unless we an access handler made trouble. */
8321 if (rc == VINF_SUCCESS)
8322 *pTmpRsp = NewRsp;
8323
8324 return rc;
8325}
8326
8327
8328/**
8329 * Pushes a dword onto the stack, using a temporary stack pointer.
8330 *
8331 * @returns Strict VBox status code.
8332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8333 * @param u32Value The value to push.
8334 * @param pTmpRsp Pointer to the temporary stack pointer.
8335 */
8336VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8337{
8338 /* Increment the stack pointer. */
8339 RTUINT64U NewRsp = *pTmpRsp;
8340 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8341
8342 /* Write the word the lazy way. */
8343 uint32_t *pu32Dst;
8344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8345 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8346 if (rc == VINF_SUCCESS)
8347 {
8348 *pu32Dst = u32Value;
8349 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8350 }
8351
8352 /* Commit the new RSP value unless we an access handler made trouble. */
8353 if (rc == VINF_SUCCESS)
8354 *pTmpRsp = NewRsp;
8355
8356 return rc;
8357}
8358
8359
8360/**
8361 * Pushes a dword onto the stack, using a temporary stack pointer.
8362 *
8363 * @returns Strict VBox status code.
8364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8365 * @param u64Value The value to push.
8366 * @param pTmpRsp Pointer to the temporary stack pointer.
8367 */
8368VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8369{
8370 /* Increment the stack pointer. */
8371 RTUINT64U NewRsp = *pTmpRsp;
8372 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8373
8374 /* Write the word the lazy way. */
8375 uint64_t *pu64Dst;
8376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8377 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8378 if (rc == VINF_SUCCESS)
8379 {
8380 *pu64Dst = u64Value;
8381 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8382 }
8383
8384 /* Commit the new RSP value unless we an access handler made trouble. */
8385 if (rc == VINF_SUCCESS)
8386 *pTmpRsp = NewRsp;
8387
8388 return rc;
8389}
8390
8391
8392/**
8393 * Pops a word from the stack, using a temporary stack pointer.
8394 *
8395 * @returns Strict VBox status code.
8396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8397 * @param pu16Value Where to store the popped value.
8398 * @param pTmpRsp Pointer to the temporary stack pointer.
8399 */
8400VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8401{
8402 /* Increment the stack pointer. */
8403 RTUINT64U NewRsp = *pTmpRsp;
8404 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8405
8406 /* Write the word the lazy way. */
8407 uint16_t const *pu16Src;
8408 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8409 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8410 if (rc == VINF_SUCCESS)
8411 {
8412 *pu16Value = *pu16Src;
8413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8414
8415 /* Commit the new RSP value. */
8416 if (rc == VINF_SUCCESS)
8417 *pTmpRsp = NewRsp;
8418 }
8419
8420 return rc;
8421}
8422
8423
8424/**
8425 * Pops a dword from the stack, using a temporary stack pointer.
8426 *
8427 * @returns Strict VBox status code.
8428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8429 * @param pu32Value Where to store the popped value.
8430 * @param pTmpRsp Pointer to the temporary stack pointer.
8431 */
8432VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8433{
8434 /* Increment the stack pointer. */
8435 RTUINT64U NewRsp = *pTmpRsp;
8436 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8437
8438 /* Write the word the lazy way. */
8439 uint32_t const *pu32Src;
8440 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8441 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8442 if (rc == VINF_SUCCESS)
8443 {
8444 *pu32Value = *pu32Src;
8445 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8446
8447 /* Commit the new RSP value. */
8448 if (rc == VINF_SUCCESS)
8449 *pTmpRsp = NewRsp;
8450 }
8451
8452 return rc;
8453}
8454
8455
8456/**
8457 * Pops a qword from the stack, using a temporary stack pointer.
8458 *
8459 * @returns Strict VBox status code.
8460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8461 * @param pu64Value Where to store the popped value.
8462 * @param pTmpRsp Pointer to the temporary stack pointer.
8463 */
8464VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8465{
8466 /* Increment the stack pointer. */
8467 RTUINT64U NewRsp = *pTmpRsp;
8468 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8469
8470 /* Write the word the lazy way. */
8471 uint64_t const *pu64Src;
8472 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8473 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8474 if (rcStrict == VINF_SUCCESS)
8475 {
8476 *pu64Value = *pu64Src;
8477 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8478
8479 /* Commit the new RSP value. */
8480 if (rcStrict == VINF_SUCCESS)
8481 *pTmpRsp = NewRsp;
8482 }
8483
8484 return rcStrict;
8485}
8486
8487
8488/**
8489 * Begin a special stack push (used by interrupt, exceptions and such).
8490 *
8491 * This will raise \#SS or \#PF if appropriate.
8492 *
8493 * @returns Strict VBox status code.
8494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8495 * @param cbMem The number of bytes to push onto the stack.
8496 * @param cbAlign The alignment mask (7, 3, 1).
8497 * @param ppvMem Where to return the pointer to the stack memory.
8498 * As with the other memory functions this could be
8499 * direct access or bounce buffered access, so
8500 * don't commit register until the commit call
8501 * succeeds.
8502 * @param puNewRsp Where to return the new RSP value. This must be
8503 * passed unchanged to
8504 * iemMemStackPushCommitSpecial().
8505 */
8506VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8507 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8508{
8509 Assert(cbMem < UINT8_MAX);
8510 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8511 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8512 IEM_ACCESS_STACK_W, cbAlign);
8513}
8514
8515
8516/**
8517 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8518 *
8519 * This will update the rSP.
8520 *
8521 * @returns Strict VBox status code.
8522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8523 * @param pvMem The pointer returned by
8524 * iemMemStackPushBeginSpecial().
8525 * @param uNewRsp The new RSP value returned by
8526 * iemMemStackPushBeginSpecial().
8527 */
8528VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8529{
8530 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8531 if (rcStrict == VINF_SUCCESS)
8532 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8533 return rcStrict;
8534}
8535
8536
8537/**
8538 * Begin a special stack pop (used by iret, retf and such).
8539 *
8540 * This will raise \#SS or \#PF if appropriate.
8541 *
8542 * @returns Strict VBox status code.
8543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8544 * @param cbMem The number of bytes to pop from the stack.
8545 * @param cbAlign The alignment mask (7, 3, 1).
8546 * @param ppvMem Where to return the pointer to the stack memory.
8547 * @param puNewRsp Where to return the new RSP value. This must be
8548 * assigned to CPUMCTX::rsp manually some time
8549 * after iemMemStackPopDoneSpecial() has been
8550 * called.
8551 */
8552VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8553 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8554{
8555 Assert(cbMem < UINT8_MAX);
8556 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8557 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8558}
8559
8560
8561/**
8562 * Continue a special stack pop (used by iret and retf), for the purpose of
8563 * retrieving a new stack pointer.
8564 *
8565 * This will raise \#SS or \#PF if appropriate.
8566 *
8567 * @returns Strict VBox status code.
8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8569 * @param off Offset from the top of the stack. This is zero
8570 * except in the retf case.
8571 * @param cbMem The number of bytes to pop from the stack.
8572 * @param ppvMem Where to return the pointer to the stack memory.
8573 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8574 * return this because all use of this function is
8575 * to retrieve a new value and anything we return
8576 * here would be discarded.)
8577 */
8578VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8579 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8580{
8581 Assert(cbMem < UINT8_MAX);
8582
8583 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8584 RTGCPTR GCPtrTop;
8585 if (IEM_IS_64BIT_CODE(pVCpu))
8586 GCPtrTop = uCurNewRsp;
8587 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8588 GCPtrTop = (uint32_t)uCurNewRsp;
8589 else
8590 GCPtrTop = (uint16_t)uCurNewRsp;
8591
8592 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8593 0 /* checked in iemMemStackPopBeginSpecial */);
8594}
8595
8596
8597/**
8598 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8599 * iemMemStackPopContinueSpecial).
8600 *
8601 * The caller will manually commit the rSP.
8602 *
8603 * @returns Strict VBox status code.
8604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8605 * @param pvMem The pointer returned by
8606 * iemMemStackPopBeginSpecial() or
8607 * iemMemStackPopContinueSpecial().
8608 */
8609VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8610{
8611 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8612}
8613
8614
8615/**
8616 * Fetches a system table byte.
8617 *
8618 * @returns Strict VBox status code.
8619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8620 * @param pbDst Where to return the byte.
8621 * @param iSegReg The index of the segment register to use for
8622 * this access. The base and limits are checked.
8623 * @param GCPtrMem The address of the guest memory.
8624 */
8625VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8626{
8627 /* The lazy approach for now... */
8628 uint8_t const *pbSrc;
8629 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8630 if (rc == VINF_SUCCESS)
8631 {
8632 *pbDst = *pbSrc;
8633 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8634 }
8635 return rc;
8636}
8637
8638
8639/**
8640 * Fetches a system table word.
8641 *
8642 * @returns Strict VBox status code.
8643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8644 * @param pu16Dst Where to return the word.
8645 * @param iSegReg The index of the segment register to use for
8646 * this access. The base and limits are checked.
8647 * @param GCPtrMem The address of the guest memory.
8648 */
8649VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8650{
8651 /* The lazy approach for now... */
8652 uint16_t const *pu16Src;
8653 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8654 if (rc == VINF_SUCCESS)
8655 {
8656 *pu16Dst = *pu16Src;
8657 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8658 }
8659 return rc;
8660}
8661
8662
8663/**
8664 * Fetches a system table dword.
8665 *
8666 * @returns Strict VBox status code.
8667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8668 * @param pu32Dst Where to return the dword.
8669 * @param iSegReg The index of the segment register to use for
8670 * this access. The base and limits are checked.
8671 * @param GCPtrMem The address of the guest memory.
8672 */
8673VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8674{
8675 /* The lazy approach for now... */
8676 uint32_t const *pu32Src;
8677 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8678 if (rc == VINF_SUCCESS)
8679 {
8680 *pu32Dst = *pu32Src;
8681 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8682 }
8683 return rc;
8684}
8685
8686
8687/**
8688 * Fetches a system table qword.
8689 *
8690 * @returns Strict VBox status code.
8691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8692 * @param pu64Dst Where to return the qword.
8693 * @param iSegReg The index of the segment register to use for
8694 * this access. The base and limits are checked.
8695 * @param GCPtrMem The address of the guest memory.
8696 */
8697VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8698{
8699 /* The lazy approach for now... */
8700 uint64_t const *pu64Src;
8701 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8702 if (rc == VINF_SUCCESS)
8703 {
8704 *pu64Dst = *pu64Src;
8705 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8706 }
8707 return rc;
8708}
8709
8710
8711/**
8712 * Fetches a descriptor table entry with caller specified error code.
8713 *
8714 * @returns Strict VBox status code.
8715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8716 * @param pDesc Where to return the descriptor table entry.
8717 * @param uSel The selector which table entry to fetch.
8718 * @param uXcpt The exception to raise on table lookup error.
8719 * @param uErrorCode The error code associated with the exception.
8720 */
8721static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8722 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8723{
8724 AssertPtr(pDesc);
8725 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8726
8727 /** @todo did the 286 require all 8 bytes to be accessible? */
8728 /*
8729 * Get the selector table base and check bounds.
8730 */
8731 RTGCPTR GCPtrBase;
8732 if (uSel & X86_SEL_LDT)
8733 {
8734 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8735 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8736 {
8737 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8738 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8739 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8740 uErrorCode, 0);
8741 }
8742
8743 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8744 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8745 }
8746 else
8747 {
8748 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8749 {
8750 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8751 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8752 uErrorCode, 0);
8753 }
8754 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8755 }
8756
8757 /*
8758 * Read the legacy descriptor and maybe the long mode extensions if
8759 * required.
8760 */
8761 VBOXSTRICTRC rcStrict;
8762 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8763 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8764 else
8765 {
8766 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8767 if (rcStrict == VINF_SUCCESS)
8768 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8769 if (rcStrict == VINF_SUCCESS)
8770 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8771 if (rcStrict == VINF_SUCCESS)
8772 pDesc->Legacy.au16[3] = 0;
8773 else
8774 return rcStrict;
8775 }
8776
8777 if (rcStrict == VINF_SUCCESS)
8778 {
8779 if ( !IEM_IS_LONG_MODE(pVCpu)
8780 || pDesc->Legacy.Gen.u1DescType)
8781 pDesc->Long.au64[1] = 0;
8782 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8783 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8784 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8785 else
8786 {
8787 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8788 /** @todo is this the right exception? */
8789 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8790 }
8791 }
8792 return rcStrict;
8793}
8794
8795
8796/**
8797 * Fetches a descriptor table entry.
8798 *
8799 * @returns Strict VBox status code.
8800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8801 * @param pDesc Where to return the descriptor table entry.
8802 * @param uSel The selector which table entry to fetch.
8803 * @param uXcpt The exception to raise on table lookup error.
8804 */
8805VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8806{
8807 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8808}
8809
8810
8811/**
8812 * Marks the selector descriptor as accessed (only non-system descriptors).
8813 *
8814 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8815 * will therefore skip the limit checks.
8816 *
8817 * @returns Strict VBox status code.
8818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8819 * @param uSel The selector.
8820 */
8821VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8822{
8823 /*
8824 * Get the selector table base and calculate the entry address.
8825 */
8826 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8827 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8828 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8829 GCPtr += uSel & X86_SEL_MASK;
8830
8831 /*
8832 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8833 * ugly stuff to avoid this. This will make sure it's an atomic access
8834 * as well more or less remove any question about 8-bit or 32-bit accesss.
8835 */
8836 VBOXSTRICTRC rcStrict;
8837 uint32_t volatile *pu32;
8838 if ((GCPtr & 3) == 0)
8839 {
8840 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8841 GCPtr += 2 + 2;
8842 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8843 if (rcStrict != VINF_SUCCESS)
8844 return rcStrict;
8845 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8846 }
8847 else
8848 {
8849 /* The misaligned GDT/LDT case, map the whole thing. */
8850 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8851 if (rcStrict != VINF_SUCCESS)
8852 return rcStrict;
8853 switch ((uintptr_t)pu32 & 3)
8854 {
8855 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8856 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8857 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8858 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8859 }
8860 }
8861
8862 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8863}
8864
8865/** @} */
8866
8867/** @name Opcode Helpers.
8868 * @{
8869 */
8870
8871/**
8872 * Calculates the effective address of a ModR/M memory operand.
8873 *
8874 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8875 *
8876 * @return Strict VBox status code.
8877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8878 * @param bRm The ModRM byte.
8879 * @param cbImmAndRspOffset - First byte: The size of any immediate
8880 * following the effective address opcode bytes
8881 * (only for RIP relative addressing).
8882 * - Second byte: RSP displacement (for POP [ESP]).
8883 * @param pGCPtrEff Where to return the effective address.
8884 */
8885VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8886{
8887 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8888# define SET_SS_DEF() \
8889 do \
8890 { \
8891 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8892 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8893 } while (0)
8894
8895 if (!IEM_IS_64BIT_CODE(pVCpu))
8896 {
8897/** @todo Check the effective address size crap! */
8898 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8899 {
8900 uint16_t u16EffAddr;
8901
8902 /* Handle the disp16 form with no registers first. */
8903 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8904 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8905 else
8906 {
8907 /* Get the displacment. */
8908 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8909 {
8910 case 0: u16EffAddr = 0; break;
8911 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8912 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8913 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8914 }
8915
8916 /* Add the base and index registers to the disp. */
8917 switch (bRm & X86_MODRM_RM_MASK)
8918 {
8919 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8920 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8921 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8922 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8923 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8924 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8925 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8926 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8927 }
8928 }
8929
8930 *pGCPtrEff = u16EffAddr;
8931 }
8932 else
8933 {
8934 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8935 uint32_t u32EffAddr;
8936
8937 /* Handle the disp32 form with no registers first. */
8938 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8939 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8940 else
8941 {
8942 /* Get the register (or SIB) value. */
8943 switch ((bRm & X86_MODRM_RM_MASK))
8944 {
8945 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8946 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8947 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8948 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8949 case 4: /* SIB */
8950 {
8951 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8952
8953 /* Get the index and scale it. */
8954 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8955 {
8956 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8957 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8958 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8959 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8960 case 4: u32EffAddr = 0; /*none */ break;
8961 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8962 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8963 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8964 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8965 }
8966 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8967
8968 /* add base */
8969 switch (bSib & X86_SIB_BASE_MASK)
8970 {
8971 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8972 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8973 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8974 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8975 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8976 case 5:
8977 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8978 {
8979 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8980 SET_SS_DEF();
8981 }
8982 else
8983 {
8984 uint32_t u32Disp;
8985 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8986 u32EffAddr += u32Disp;
8987 }
8988 break;
8989 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8990 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8992 }
8993 break;
8994 }
8995 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8996 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8997 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8998 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8999 }
9000
9001 /* Get and add the displacement. */
9002 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9003 {
9004 case 0:
9005 break;
9006 case 1:
9007 {
9008 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9009 u32EffAddr += i8Disp;
9010 break;
9011 }
9012 case 2:
9013 {
9014 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9015 u32EffAddr += u32Disp;
9016 break;
9017 }
9018 default:
9019 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9020 }
9021
9022 }
9023 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9024 *pGCPtrEff = u32EffAddr;
9025 else
9026 {
9027 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9028 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9029 }
9030 }
9031 }
9032 else
9033 {
9034 uint64_t u64EffAddr;
9035
9036 /* Handle the rip+disp32 form with no registers first. */
9037 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9038 {
9039 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9040 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9041 }
9042 else
9043 {
9044 /* Get the register (or SIB) value. */
9045 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9046 {
9047 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9048 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9049 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9050 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9051 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9052 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9053 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9054 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9055 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9056 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9057 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9058 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9059 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9060 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9061 /* SIB */
9062 case 4:
9063 case 12:
9064 {
9065 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9066
9067 /* Get the index and scale it. */
9068 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9069 {
9070 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9071 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9072 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9073 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9074 case 4: u64EffAddr = 0; /*none */ break;
9075 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9076 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9077 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9078 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9079 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9080 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9081 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9082 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9083 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9084 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9085 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9086 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9087 }
9088 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9089
9090 /* add base */
9091 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9092 {
9093 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9094 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9095 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9096 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9097 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9098 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9099 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9100 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9101 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9102 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9103 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9104 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9105 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9106 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9107 /* complicated encodings */
9108 case 5:
9109 case 13:
9110 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9111 {
9112 if (!pVCpu->iem.s.uRexB)
9113 {
9114 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9115 SET_SS_DEF();
9116 }
9117 else
9118 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9119 }
9120 else
9121 {
9122 uint32_t u32Disp;
9123 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9124 u64EffAddr += (int32_t)u32Disp;
9125 }
9126 break;
9127 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9128 }
9129 break;
9130 }
9131 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9132 }
9133
9134 /* Get and add the displacement. */
9135 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9136 {
9137 case 0:
9138 break;
9139 case 1:
9140 {
9141 int8_t i8Disp;
9142 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9143 u64EffAddr += i8Disp;
9144 break;
9145 }
9146 case 2:
9147 {
9148 uint32_t u32Disp;
9149 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9150 u64EffAddr += (int32_t)u32Disp;
9151 break;
9152 }
9153 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9154 }
9155
9156 }
9157
9158 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9159 *pGCPtrEff = u64EffAddr;
9160 else
9161 {
9162 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9163 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9164 }
9165 }
9166
9167 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9168 return VINF_SUCCESS;
9169}
9170
9171
9172#ifdef IEM_WITH_SETJMP
9173/**
9174 * Calculates the effective address of a ModR/M memory operand.
9175 *
9176 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9177 *
9178 * May longjmp on internal error.
9179 *
9180 * @return The effective address.
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param bRm The ModRM byte.
9183 * @param cbImmAndRspOffset - First byte: The size of any immediate
9184 * following the effective address opcode bytes
9185 * (only for RIP relative addressing).
9186 * - Second byte: RSP displacement (for POP [ESP]).
9187 */
9188RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9189{
9190 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9191# define SET_SS_DEF() \
9192 do \
9193 { \
9194 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9195 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9196 } while (0)
9197
9198 if (!IEM_IS_64BIT_CODE(pVCpu))
9199 {
9200/** @todo Check the effective address size crap! */
9201 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9202 {
9203 uint16_t u16EffAddr;
9204
9205 /* Handle the disp16 form with no registers first. */
9206 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9207 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9208 else
9209 {
9210 /* Get the displacment. */
9211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9212 {
9213 case 0: u16EffAddr = 0; break;
9214 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9215 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9216 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9217 }
9218
9219 /* Add the base and index registers to the disp. */
9220 switch (bRm & X86_MODRM_RM_MASK)
9221 {
9222 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9223 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9224 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9225 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9226 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9227 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9228 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9229 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9230 }
9231 }
9232
9233 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9234 return u16EffAddr;
9235 }
9236
9237 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9238 uint32_t u32EffAddr;
9239
9240 /* Handle the disp32 form with no registers first. */
9241 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9242 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9243 else
9244 {
9245 /* Get the register (or SIB) value. */
9246 switch ((bRm & X86_MODRM_RM_MASK))
9247 {
9248 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9249 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9250 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9251 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9252 case 4: /* SIB */
9253 {
9254 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9255
9256 /* Get the index and scale it. */
9257 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9258 {
9259 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9260 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9261 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9262 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9263 case 4: u32EffAddr = 0; /*none */ break;
9264 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9265 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9266 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9267 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9268 }
9269 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9270
9271 /* add base */
9272 switch (bSib & X86_SIB_BASE_MASK)
9273 {
9274 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9275 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9276 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9277 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9278 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9279 case 5:
9280 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9281 {
9282 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9283 SET_SS_DEF();
9284 }
9285 else
9286 {
9287 uint32_t u32Disp;
9288 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9289 u32EffAddr += u32Disp;
9290 }
9291 break;
9292 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9293 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9294 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9295 }
9296 break;
9297 }
9298 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9299 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9300 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9301 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9302 }
9303
9304 /* Get and add the displacement. */
9305 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9306 {
9307 case 0:
9308 break;
9309 case 1:
9310 {
9311 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9312 u32EffAddr += i8Disp;
9313 break;
9314 }
9315 case 2:
9316 {
9317 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9318 u32EffAddr += u32Disp;
9319 break;
9320 }
9321 default:
9322 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9323 }
9324 }
9325
9326 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9327 {
9328 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9329 return u32EffAddr;
9330 }
9331 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9332 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9333 return u32EffAddr & UINT16_MAX;
9334 }
9335
9336 uint64_t u64EffAddr;
9337
9338 /* Handle the rip+disp32 form with no registers first. */
9339 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9340 {
9341 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9342 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9343 }
9344 else
9345 {
9346 /* Get the register (or SIB) value. */
9347 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9348 {
9349 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9350 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9351 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9352 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9353 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9354 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9355 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9356 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9357 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9358 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9359 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9360 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9361 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9362 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9363 /* SIB */
9364 case 4:
9365 case 12:
9366 {
9367 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9368
9369 /* Get the index and scale it. */
9370 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9371 {
9372 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9373 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9374 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9375 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9376 case 4: u64EffAddr = 0; /*none */ break;
9377 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9378 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9379 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9380 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9381 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9382 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9383 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9384 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9385 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9386 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9387 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9388 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9389 }
9390 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9391
9392 /* add base */
9393 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9394 {
9395 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9396 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9397 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9398 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9399 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9400 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9401 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9402 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9403 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9404 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9405 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9406 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9407 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9408 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9409 /* complicated encodings */
9410 case 5:
9411 case 13:
9412 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9413 {
9414 if (!pVCpu->iem.s.uRexB)
9415 {
9416 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9417 SET_SS_DEF();
9418 }
9419 else
9420 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9421 }
9422 else
9423 {
9424 uint32_t u32Disp;
9425 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9426 u64EffAddr += (int32_t)u32Disp;
9427 }
9428 break;
9429 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9430 }
9431 break;
9432 }
9433 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9434 }
9435
9436 /* Get and add the displacement. */
9437 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9438 {
9439 case 0:
9440 break;
9441 case 1:
9442 {
9443 int8_t i8Disp;
9444 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9445 u64EffAddr += i8Disp;
9446 break;
9447 }
9448 case 2:
9449 {
9450 uint32_t u32Disp;
9451 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9452 u64EffAddr += (int32_t)u32Disp;
9453 break;
9454 }
9455 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9456 }
9457
9458 }
9459
9460 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9461 {
9462 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9463 return u64EffAddr;
9464 }
9465 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9466 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9467 return u64EffAddr & UINT32_MAX;
9468}
9469#endif /* IEM_WITH_SETJMP */
9470
9471
9472/**
9473 * Calculates the effective address of a ModR/M memory operand, extended version
9474 * for use in the recompilers.
9475 *
9476 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9477 *
9478 * @return Strict VBox status code.
9479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9480 * @param bRm The ModRM byte.
9481 * @param cbImmAndRspOffset - First byte: The size of any immediate
9482 * following the effective address opcode bytes
9483 * (only for RIP relative addressing).
9484 * - Second byte: RSP displacement (for POP [ESP]).
9485 * @param pGCPtrEff Where to return the effective address.
9486 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9487 * SIB byte (bits 39:32).
9488 */
9489VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9490{
9491 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9492# define SET_SS_DEF() \
9493 do \
9494 { \
9495 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9496 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9497 } while (0)
9498
9499 uint64_t uInfo;
9500 if (!IEM_IS_64BIT_CODE(pVCpu))
9501 {
9502/** @todo Check the effective address size crap! */
9503 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9504 {
9505 uint16_t u16EffAddr;
9506
9507 /* Handle the disp16 form with no registers first. */
9508 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9509 {
9510 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9511 uInfo = u16EffAddr;
9512 }
9513 else
9514 {
9515 /* Get the displacment. */
9516 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9517 {
9518 case 0: u16EffAddr = 0; break;
9519 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9520 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9521 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9522 }
9523 uInfo = u16EffAddr;
9524
9525 /* Add the base and index registers to the disp. */
9526 switch (bRm & X86_MODRM_RM_MASK)
9527 {
9528 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9529 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9530 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9531 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9532 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9533 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9534 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9535 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9536 }
9537 }
9538
9539 *pGCPtrEff = u16EffAddr;
9540 }
9541 else
9542 {
9543 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9544 uint32_t u32EffAddr;
9545
9546 /* Handle the disp32 form with no registers first. */
9547 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9548 {
9549 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9550 uInfo = u32EffAddr;
9551 }
9552 else
9553 {
9554 /* Get the register (or SIB) value. */
9555 uInfo = 0;
9556 switch ((bRm & X86_MODRM_RM_MASK))
9557 {
9558 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9559 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9560 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9561 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9562 case 4: /* SIB */
9563 {
9564 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9565 uInfo = (uint64_t)bSib << 32;
9566
9567 /* Get the index and scale it. */
9568 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9569 {
9570 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9571 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9572 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9573 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9574 case 4: u32EffAddr = 0; /*none */ break;
9575 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9576 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9577 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9578 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9579 }
9580 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9581
9582 /* add base */
9583 switch (bSib & X86_SIB_BASE_MASK)
9584 {
9585 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9586 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9587 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9588 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9589 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9590 case 5:
9591 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9592 {
9593 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9594 SET_SS_DEF();
9595 }
9596 else
9597 {
9598 uint32_t u32Disp;
9599 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9600 u32EffAddr += u32Disp;
9601 uInfo |= u32Disp;
9602 }
9603 break;
9604 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9605 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9606 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9607 }
9608 break;
9609 }
9610 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9611 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9612 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9613 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9614 }
9615
9616 /* Get and add the displacement. */
9617 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9618 {
9619 case 0:
9620 break;
9621 case 1:
9622 {
9623 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9624 u32EffAddr += i8Disp;
9625 uInfo |= (uint32_t)(int32_t)i8Disp;
9626 break;
9627 }
9628 case 2:
9629 {
9630 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9631 u32EffAddr += u32Disp;
9632 uInfo |= (uint32_t)u32Disp;
9633 break;
9634 }
9635 default:
9636 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9637 }
9638
9639 }
9640 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9641 *pGCPtrEff = u32EffAddr;
9642 else
9643 {
9644 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9645 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9646 }
9647 }
9648 }
9649 else
9650 {
9651 uint64_t u64EffAddr;
9652
9653 /* Handle the rip+disp32 form with no registers first. */
9654 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9655 {
9656 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9657 uInfo = (uint32_t)u64EffAddr;
9658 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9659 }
9660 else
9661 {
9662 /* Get the register (or SIB) value. */
9663 uInfo = 0;
9664 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9665 {
9666 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9667 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9668 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9669 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9670 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9671 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9672 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9673 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9674 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9675 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9676 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9677 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9678 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9679 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9680 /* SIB */
9681 case 4:
9682 case 12:
9683 {
9684 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9685 uInfo = (uint64_t)bSib << 32;
9686
9687 /* Get the index and scale it. */
9688 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9689 {
9690 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9691 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9692 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9693 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9694 case 4: u64EffAddr = 0; /*none */ break;
9695 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9696 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9697 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9698 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9699 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9700 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9701 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9702 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9703 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9704 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9705 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9706 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9707 }
9708 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9709
9710 /* add base */
9711 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9712 {
9713 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9714 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9715 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9716 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9717 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9718 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9719 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9720 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9721 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9722 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9723 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9724 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9725 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9726 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9727 /* complicated encodings */
9728 case 5:
9729 case 13:
9730 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9731 {
9732 if (!pVCpu->iem.s.uRexB)
9733 {
9734 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9735 SET_SS_DEF();
9736 }
9737 else
9738 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9739 }
9740 else
9741 {
9742 uint32_t u32Disp;
9743 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9744 u64EffAddr += (int32_t)u32Disp;
9745 uInfo |= u32Disp;
9746 }
9747 break;
9748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9749 }
9750 break;
9751 }
9752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9753 }
9754
9755 /* Get and add the displacement. */
9756 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9757 {
9758 case 0:
9759 break;
9760 case 1:
9761 {
9762 int8_t i8Disp;
9763 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9764 u64EffAddr += i8Disp;
9765 uInfo |= (uint32_t)(int32_t)i8Disp;
9766 break;
9767 }
9768 case 2:
9769 {
9770 uint32_t u32Disp;
9771 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9772 u64EffAddr += (int32_t)u32Disp;
9773 uInfo |= u32Disp;
9774 break;
9775 }
9776 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9777 }
9778
9779 }
9780
9781 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9782 *pGCPtrEff = u64EffAddr;
9783 else
9784 {
9785 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9786 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9787 }
9788 }
9789 *puInfo = uInfo;
9790
9791 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9792 return VINF_SUCCESS;
9793}
9794
9795/** @} */
9796
9797
9798#ifdef LOG_ENABLED
9799/**
9800 * Logs the current instruction.
9801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9802 * @param fSameCtx Set if we have the same context information as the VMM,
9803 * clear if we may have already executed an instruction in
9804 * our debug context. When clear, we assume IEMCPU holds
9805 * valid CPU mode info.
9806 *
9807 * The @a fSameCtx parameter is now misleading and obsolete.
9808 * @param pszFunction The IEM function doing the execution.
9809 */
9810static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9811{
9812# ifdef IN_RING3
9813 if (LogIs2Enabled())
9814 {
9815 char szInstr[256];
9816 uint32_t cbInstr = 0;
9817 if (fSameCtx)
9818 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9819 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9820 szInstr, sizeof(szInstr), &cbInstr);
9821 else
9822 {
9823 uint32_t fFlags = 0;
9824 switch (IEM_GET_CPU_MODE(pVCpu))
9825 {
9826 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9827 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9828 case IEMMODE_16BIT:
9829 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9830 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9831 else
9832 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9833 break;
9834 }
9835 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9836 szInstr, sizeof(szInstr), &cbInstr);
9837 }
9838
9839 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9840 Log2(("**** %s fExec=%x\n"
9841 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9842 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9843 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9844 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9845 " %s\n"
9846 , pszFunction, pVCpu->iem.s.fExec,
9847 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9848 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9850 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9851 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9852 szInstr));
9853
9854 if (LogIs3Enabled())
9855 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9856 }
9857 else
9858# endif
9859 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9860 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9861 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9862}
9863#endif /* LOG_ENABLED */
9864
9865
9866#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9867/**
9868 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9869 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9870 *
9871 * @returns Modified rcStrict.
9872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9873 * @param rcStrict The instruction execution status.
9874 */
9875static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9876{
9877 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9878 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9879 {
9880 /* VMX preemption timer takes priority over NMI-window exits. */
9881 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9882 {
9883 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9884 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9885 }
9886 /*
9887 * Check remaining intercepts.
9888 *
9889 * NMI-window and Interrupt-window VM-exits.
9890 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9891 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9892 *
9893 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9894 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9895 */
9896 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9897 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9898 && !TRPMHasTrap(pVCpu))
9899 {
9900 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9901 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9902 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9903 {
9904 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9905 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9906 }
9907 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9908 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9909 {
9910 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9911 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9912 }
9913 }
9914 }
9915 /* TPR-below threshold/APIC write has the highest priority. */
9916 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9917 {
9918 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9919 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9920 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9921 }
9922 /* MTF takes priority over VMX-preemption timer. */
9923 else
9924 {
9925 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9926 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9927 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9928 }
9929 return rcStrict;
9930}
9931#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9932
9933
9934/**
9935 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9936 * IEMExecOneWithPrefetchedByPC.
9937 *
9938 * Similar code is found in IEMExecLots.
9939 *
9940 * @return Strict VBox status code.
9941 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9942 * @param fExecuteInhibit If set, execute the instruction following CLI,
9943 * POP SS and MOV SS,GR.
9944 * @param pszFunction The calling function name.
9945 */
9946DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9947{
9948 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9949 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9950 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9951 RT_NOREF_PV(pszFunction);
9952
9953#ifdef IEM_WITH_SETJMP
9954 VBOXSTRICTRC rcStrict;
9955 IEM_TRY_SETJMP(pVCpu, rcStrict)
9956 {
9957 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9958 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9959 }
9960 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9961 {
9962 pVCpu->iem.s.cLongJumps++;
9963 }
9964 IEM_CATCH_LONGJMP_END(pVCpu);
9965#else
9966 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9967 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9968#endif
9969 if (rcStrict == VINF_SUCCESS)
9970 pVCpu->iem.s.cInstructions++;
9971 if (pVCpu->iem.s.cActiveMappings > 0)
9972 {
9973 Assert(rcStrict != VINF_SUCCESS);
9974 iemMemRollback(pVCpu);
9975 }
9976 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9977 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9978 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9979
9980//#ifdef DEBUG
9981// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9982//#endif
9983
9984#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9985 /*
9986 * Perform any VMX nested-guest instruction boundary actions.
9987 *
9988 * If any of these causes a VM-exit, we must skip executing the next
9989 * instruction (would run into stale page tables). A VM-exit makes sure
9990 * there is no interrupt-inhibition, so that should ensure we don't go
9991 * to try execute the next instruction. Clearing fExecuteInhibit is
9992 * problematic because of the setjmp/longjmp clobbering above.
9993 */
9994 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9995 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9996 || rcStrict != VINF_SUCCESS)
9997 { /* likely */ }
9998 else
9999 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10000#endif
10001
10002 /* Execute the next instruction as well if a cli, pop ss or
10003 mov ss, Gr has just completed successfully. */
10004 if ( fExecuteInhibit
10005 && rcStrict == VINF_SUCCESS
10006 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10007 {
10008 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10009 if (rcStrict == VINF_SUCCESS)
10010 {
10011#ifdef LOG_ENABLED
10012 iemLogCurInstr(pVCpu, false, pszFunction);
10013#endif
10014#ifdef IEM_WITH_SETJMP
10015 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10016 {
10017 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10018 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10019 }
10020 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10021 {
10022 pVCpu->iem.s.cLongJumps++;
10023 }
10024 IEM_CATCH_LONGJMP_END(pVCpu);
10025#else
10026 IEM_OPCODE_GET_FIRST_U8(&b);
10027 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10028#endif
10029 if (rcStrict == VINF_SUCCESS)
10030 {
10031 pVCpu->iem.s.cInstructions++;
10032#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10033 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10034 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10035 { /* likely */ }
10036 else
10037 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10038#endif
10039 }
10040 if (pVCpu->iem.s.cActiveMappings > 0)
10041 {
10042 Assert(rcStrict != VINF_SUCCESS);
10043 iemMemRollback(pVCpu);
10044 }
10045 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10046 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10047 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10048 }
10049 else if (pVCpu->iem.s.cActiveMappings > 0)
10050 iemMemRollback(pVCpu);
10051 /** @todo drop this after we bake this change into RIP advancing. */
10052 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10053 }
10054
10055 /*
10056 * Return value fiddling, statistics and sanity assertions.
10057 */
10058 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10059
10060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10061 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10062 return rcStrict;
10063}
10064
10065
10066/**
10067 * Execute one instruction.
10068 *
10069 * @return Strict VBox status code.
10070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10071 */
10072VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10073{
10074 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10075#ifdef LOG_ENABLED
10076 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10077#endif
10078
10079 /*
10080 * Do the decoding and emulation.
10081 */
10082 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10083 if (rcStrict == VINF_SUCCESS)
10084 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10085 else if (pVCpu->iem.s.cActiveMappings > 0)
10086 iemMemRollback(pVCpu);
10087
10088 if (rcStrict != VINF_SUCCESS)
10089 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10090 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10091 return rcStrict;
10092}
10093
10094
10095VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10096{
10097 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10098 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10099 if (rcStrict == VINF_SUCCESS)
10100 {
10101 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10102 if (pcbWritten)
10103 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10104 }
10105 else if (pVCpu->iem.s.cActiveMappings > 0)
10106 iemMemRollback(pVCpu);
10107
10108 return rcStrict;
10109}
10110
10111
10112VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10113 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10114{
10115 VBOXSTRICTRC rcStrict;
10116 if ( cbOpcodeBytes
10117 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10118 {
10119 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10120#ifdef IEM_WITH_CODE_TLB
10121 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10122 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10123 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10124 pVCpu->iem.s.offCurInstrStart = 0;
10125 pVCpu->iem.s.offInstrNextByte = 0;
10126 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10127#else
10128 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10129 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10130#endif
10131 rcStrict = VINF_SUCCESS;
10132 }
10133 else
10134 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10135 if (rcStrict == VINF_SUCCESS)
10136 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10137 else if (pVCpu->iem.s.cActiveMappings > 0)
10138 iemMemRollback(pVCpu);
10139
10140 return rcStrict;
10141}
10142
10143
10144VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10145{
10146 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10147 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10148 if (rcStrict == VINF_SUCCESS)
10149 {
10150 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10151 if (pcbWritten)
10152 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10153 }
10154 else if (pVCpu->iem.s.cActiveMappings > 0)
10155 iemMemRollback(pVCpu);
10156
10157 return rcStrict;
10158}
10159
10160
10161VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10162 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10163{
10164 VBOXSTRICTRC rcStrict;
10165 if ( cbOpcodeBytes
10166 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10167 {
10168 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10169#ifdef IEM_WITH_CODE_TLB
10170 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10171 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10172 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10173 pVCpu->iem.s.offCurInstrStart = 0;
10174 pVCpu->iem.s.offInstrNextByte = 0;
10175 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10176#else
10177 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10178 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10179#endif
10180 rcStrict = VINF_SUCCESS;
10181 }
10182 else
10183 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10184 if (rcStrict == VINF_SUCCESS)
10185 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10186 else if (pVCpu->iem.s.cActiveMappings > 0)
10187 iemMemRollback(pVCpu);
10188
10189 return rcStrict;
10190}
10191
10192
10193/**
10194 * For handling split cacheline lock operations when the host has split-lock
10195 * detection enabled.
10196 *
10197 * This will cause the interpreter to disregard the lock prefix and implicit
10198 * locking (xchg).
10199 *
10200 * @returns Strict VBox status code.
10201 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10202 */
10203VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10204{
10205 /*
10206 * Do the decoding and emulation.
10207 */
10208 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10209 if (rcStrict == VINF_SUCCESS)
10210 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10211 else if (pVCpu->iem.s.cActiveMappings > 0)
10212 iemMemRollback(pVCpu);
10213
10214 if (rcStrict != VINF_SUCCESS)
10215 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10217 return rcStrict;
10218}
10219
10220
10221/**
10222 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10223 * inject a pending TRPM trap.
10224 */
10225VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10226{
10227 Assert(TRPMHasTrap(pVCpu));
10228
10229 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10230 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10231 {
10232 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10233#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10234 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10235 if (fIntrEnabled)
10236 {
10237 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10238 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10239 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10240 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10241 else
10242 {
10243 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10244 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10245 }
10246 }
10247#else
10248 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10249#endif
10250 if (fIntrEnabled)
10251 {
10252 uint8_t u8TrapNo;
10253 TRPMEVENT enmType;
10254 uint32_t uErrCode;
10255 RTGCPTR uCr2;
10256 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10257 AssertRC(rc2);
10258 Assert(enmType == TRPM_HARDWARE_INT);
10259 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10260
10261 TRPMResetTrap(pVCpu);
10262
10263#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10264 /* Injecting an event may cause a VM-exit. */
10265 if ( rcStrict != VINF_SUCCESS
10266 && rcStrict != VINF_IEM_RAISED_XCPT)
10267 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10268#else
10269 NOREF(rcStrict);
10270#endif
10271 }
10272 }
10273
10274 return VINF_SUCCESS;
10275}
10276
10277
10278VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10279{
10280 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10281 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10282 Assert(cMaxInstructions > 0);
10283
10284 /*
10285 * See if there is an interrupt pending in TRPM, inject it if we can.
10286 */
10287 /** @todo What if we are injecting an exception and not an interrupt? Is that
10288 * possible here? For now we assert it is indeed only an interrupt. */
10289 if (!TRPMHasTrap(pVCpu))
10290 { /* likely */ }
10291 else
10292 {
10293 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10294 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10295 { /*likely */ }
10296 else
10297 return rcStrict;
10298 }
10299
10300 /*
10301 * Initial decoder init w/ prefetch, then setup setjmp.
10302 */
10303 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10304 if (rcStrict == VINF_SUCCESS)
10305 {
10306#ifdef IEM_WITH_SETJMP
10307 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10308 IEM_TRY_SETJMP(pVCpu, rcStrict)
10309#endif
10310 {
10311 /*
10312 * The run loop. We limit ourselves to 4096 instructions right now.
10313 */
10314 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10315 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10316 for (;;)
10317 {
10318 /*
10319 * Log the state.
10320 */
10321#ifdef LOG_ENABLED
10322 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10323#endif
10324
10325 /*
10326 * Do the decoding and emulation.
10327 */
10328 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10329 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10330#ifdef VBOX_STRICT
10331 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10332#endif
10333 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10334 {
10335 Assert(pVCpu->iem.s.cActiveMappings == 0);
10336 pVCpu->iem.s.cInstructions++;
10337
10338#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10339 /* Perform any VMX nested-guest instruction boundary actions. */
10340 uint64_t fCpu = pVCpu->fLocalForcedActions;
10341 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10342 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10343 { /* likely */ }
10344 else
10345 {
10346 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10347 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10348 fCpu = pVCpu->fLocalForcedActions;
10349 else
10350 {
10351 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10352 break;
10353 }
10354 }
10355#endif
10356 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10357 {
10358#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10359 uint64_t fCpu = pVCpu->fLocalForcedActions;
10360#endif
10361 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10362 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10363 | VMCPU_FF_TLB_FLUSH
10364 | VMCPU_FF_UNHALT );
10365
10366 if (RT_LIKELY( ( !fCpu
10367 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10368 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10369 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10370 {
10371 if (--cMaxInstructionsGccStupidity > 0)
10372 {
10373 /* Poll timers every now an then according to the caller's specs. */
10374 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10375 || !TMTimerPollBool(pVM, pVCpu))
10376 {
10377 Assert(pVCpu->iem.s.cActiveMappings == 0);
10378 iemReInitDecoder(pVCpu);
10379 continue;
10380 }
10381 }
10382 }
10383 }
10384 Assert(pVCpu->iem.s.cActiveMappings == 0);
10385 }
10386 else if (pVCpu->iem.s.cActiveMappings > 0)
10387 iemMemRollback(pVCpu);
10388 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10389 break;
10390 }
10391 }
10392#ifdef IEM_WITH_SETJMP
10393 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10394 {
10395 if (pVCpu->iem.s.cActiveMappings > 0)
10396 iemMemRollback(pVCpu);
10397# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10398 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10399# endif
10400 pVCpu->iem.s.cLongJumps++;
10401 }
10402 IEM_CATCH_LONGJMP_END(pVCpu);
10403#endif
10404
10405 /*
10406 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10407 */
10408 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10409 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10410 }
10411 else
10412 {
10413 if (pVCpu->iem.s.cActiveMappings > 0)
10414 iemMemRollback(pVCpu);
10415
10416#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10417 /*
10418 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10419 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10420 */
10421 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10422#endif
10423 }
10424
10425 /*
10426 * Maybe re-enter raw-mode and log.
10427 */
10428 if (rcStrict != VINF_SUCCESS)
10429 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10430 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10431 if (pcInstructions)
10432 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10433 return rcStrict;
10434}
10435
10436
10437/**
10438 * Interface used by EMExecuteExec, does exit statistics and limits.
10439 *
10440 * @returns Strict VBox status code.
10441 * @param pVCpu The cross context virtual CPU structure.
10442 * @param fWillExit To be defined.
10443 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10444 * @param cMaxInstructions Maximum number of instructions to execute.
10445 * @param cMaxInstructionsWithoutExits
10446 * The max number of instructions without exits.
10447 * @param pStats Where to return statistics.
10448 */
10449VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10450 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10451{
10452 NOREF(fWillExit); /** @todo define flexible exit crits */
10453
10454 /*
10455 * Initialize return stats.
10456 */
10457 pStats->cInstructions = 0;
10458 pStats->cExits = 0;
10459 pStats->cMaxExitDistance = 0;
10460 pStats->cReserved = 0;
10461
10462 /*
10463 * Initial decoder init w/ prefetch, then setup setjmp.
10464 */
10465 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10466 if (rcStrict == VINF_SUCCESS)
10467 {
10468#ifdef IEM_WITH_SETJMP
10469 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10470 IEM_TRY_SETJMP(pVCpu, rcStrict)
10471#endif
10472 {
10473#ifdef IN_RING0
10474 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10475#endif
10476 uint32_t cInstructionSinceLastExit = 0;
10477
10478 /*
10479 * The run loop. We limit ourselves to 4096 instructions right now.
10480 */
10481 PVM pVM = pVCpu->CTX_SUFF(pVM);
10482 for (;;)
10483 {
10484 /*
10485 * Log the state.
10486 */
10487#ifdef LOG_ENABLED
10488 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10489#endif
10490
10491 /*
10492 * Do the decoding and emulation.
10493 */
10494 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10495
10496 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10497 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10498
10499 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10500 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10501 {
10502 pStats->cExits += 1;
10503 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10504 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10505 cInstructionSinceLastExit = 0;
10506 }
10507
10508 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10509 {
10510 Assert(pVCpu->iem.s.cActiveMappings == 0);
10511 pVCpu->iem.s.cInstructions++;
10512 pStats->cInstructions++;
10513 cInstructionSinceLastExit++;
10514
10515#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10516 /* Perform any VMX nested-guest instruction boundary actions. */
10517 uint64_t fCpu = pVCpu->fLocalForcedActions;
10518 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10519 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10520 { /* likely */ }
10521 else
10522 {
10523 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10524 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10525 fCpu = pVCpu->fLocalForcedActions;
10526 else
10527 {
10528 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10529 break;
10530 }
10531 }
10532#endif
10533 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10534 {
10535#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10536 uint64_t fCpu = pVCpu->fLocalForcedActions;
10537#endif
10538 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10539 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10540 | VMCPU_FF_TLB_FLUSH
10541 | VMCPU_FF_UNHALT );
10542 if (RT_LIKELY( ( ( !fCpu
10543 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10544 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10545 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10546 || pStats->cInstructions < cMinInstructions))
10547 {
10548 if (pStats->cInstructions < cMaxInstructions)
10549 {
10550 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10551 {
10552#ifdef IN_RING0
10553 if ( !fCheckPreemptionPending
10554 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10555#endif
10556 {
10557 Assert(pVCpu->iem.s.cActiveMappings == 0);
10558 iemReInitDecoder(pVCpu);
10559 continue;
10560 }
10561#ifdef IN_RING0
10562 rcStrict = VINF_EM_RAW_INTERRUPT;
10563 break;
10564#endif
10565 }
10566 }
10567 }
10568 Assert(!(fCpu & VMCPU_FF_IEM));
10569 }
10570 Assert(pVCpu->iem.s.cActiveMappings == 0);
10571 }
10572 else if (pVCpu->iem.s.cActiveMappings > 0)
10573 iemMemRollback(pVCpu);
10574 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10575 break;
10576 }
10577 }
10578#ifdef IEM_WITH_SETJMP
10579 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10580 {
10581 if (pVCpu->iem.s.cActiveMappings > 0)
10582 iemMemRollback(pVCpu);
10583 pVCpu->iem.s.cLongJumps++;
10584 }
10585 IEM_CATCH_LONGJMP_END(pVCpu);
10586#endif
10587
10588 /*
10589 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10590 */
10591 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10592 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10593 }
10594 else
10595 {
10596 if (pVCpu->iem.s.cActiveMappings > 0)
10597 iemMemRollback(pVCpu);
10598
10599#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10600 /*
10601 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10602 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10603 */
10604 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10605#endif
10606 }
10607
10608 /*
10609 * Maybe re-enter raw-mode and log.
10610 */
10611 if (rcStrict != VINF_SUCCESS)
10612 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10613 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10614 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10615 return rcStrict;
10616}
10617
10618
10619/**
10620 * Injects a trap, fault, abort, software interrupt or external interrupt.
10621 *
10622 * The parameter list matches TRPMQueryTrapAll pretty closely.
10623 *
10624 * @returns Strict VBox status code.
10625 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10626 * @param u8TrapNo The trap number.
10627 * @param enmType What type is it (trap/fault/abort), software
10628 * interrupt or hardware interrupt.
10629 * @param uErrCode The error code if applicable.
10630 * @param uCr2 The CR2 value if applicable.
10631 * @param cbInstr The instruction length (only relevant for
10632 * software interrupts).
10633 */
10634VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10635 uint8_t cbInstr)
10636{
10637 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10638#ifdef DBGFTRACE_ENABLED
10639 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10640 u8TrapNo, enmType, uErrCode, uCr2);
10641#endif
10642
10643 uint32_t fFlags;
10644 switch (enmType)
10645 {
10646 case TRPM_HARDWARE_INT:
10647 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10648 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10649 uErrCode = uCr2 = 0;
10650 break;
10651
10652 case TRPM_SOFTWARE_INT:
10653 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10654 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10655 uErrCode = uCr2 = 0;
10656 break;
10657
10658 case TRPM_TRAP:
10659 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10660 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10661 if (u8TrapNo == X86_XCPT_PF)
10662 fFlags |= IEM_XCPT_FLAGS_CR2;
10663 switch (u8TrapNo)
10664 {
10665 case X86_XCPT_DF:
10666 case X86_XCPT_TS:
10667 case X86_XCPT_NP:
10668 case X86_XCPT_SS:
10669 case X86_XCPT_PF:
10670 case X86_XCPT_AC:
10671 case X86_XCPT_GP:
10672 fFlags |= IEM_XCPT_FLAGS_ERR;
10673 break;
10674 }
10675 break;
10676
10677 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10678 }
10679
10680 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10681
10682 if (pVCpu->iem.s.cActiveMappings > 0)
10683 iemMemRollback(pVCpu);
10684
10685 return rcStrict;
10686}
10687
10688
10689/**
10690 * Injects the active TRPM event.
10691 *
10692 * @returns Strict VBox status code.
10693 * @param pVCpu The cross context virtual CPU structure.
10694 */
10695VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10696{
10697#ifndef IEM_IMPLEMENTS_TASKSWITCH
10698 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10699#else
10700 uint8_t u8TrapNo;
10701 TRPMEVENT enmType;
10702 uint32_t uErrCode;
10703 RTGCUINTPTR uCr2;
10704 uint8_t cbInstr;
10705 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10706 if (RT_FAILURE(rc))
10707 return rc;
10708
10709 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10710 * ICEBP \#DB injection as a special case. */
10711 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10712#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10713 if (rcStrict == VINF_SVM_VMEXIT)
10714 rcStrict = VINF_SUCCESS;
10715#endif
10716#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10717 if (rcStrict == VINF_VMX_VMEXIT)
10718 rcStrict = VINF_SUCCESS;
10719#endif
10720 /** @todo Are there any other codes that imply the event was successfully
10721 * delivered to the guest? See @bugref{6607}. */
10722 if ( rcStrict == VINF_SUCCESS
10723 || rcStrict == VINF_IEM_RAISED_XCPT)
10724 TRPMResetTrap(pVCpu);
10725
10726 return rcStrict;
10727#endif
10728}
10729
10730
10731VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10732{
10733 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10734 return VERR_NOT_IMPLEMENTED;
10735}
10736
10737
10738VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10739{
10740 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10741 return VERR_NOT_IMPLEMENTED;
10742}
10743
10744
10745/**
10746 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10747 *
10748 * This API ASSUMES that the caller has already verified that the guest code is
10749 * allowed to access the I/O port. (The I/O port is in the DX register in the
10750 * guest state.)
10751 *
10752 * @returns Strict VBox status code.
10753 * @param pVCpu The cross context virtual CPU structure.
10754 * @param cbValue The size of the I/O port access (1, 2, or 4).
10755 * @param enmAddrMode The addressing mode.
10756 * @param fRepPrefix Indicates whether a repeat prefix is used
10757 * (doesn't matter which for this instruction).
10758 * @param cbInstr The instruction length in bytes.
10759 * @param iEffSeg The effective segment address.
10760 * @param fIoChecked Whether the access to the I/O port has been
10761 * checked or not. It's typically checked in the
10762 * HM scenario.
10763 */
10764VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10765 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10766{
10767 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10769
10770 /*
10771 * State init.
10772 */
10773 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10774
10775 /*
10776 * Switch orgy for getting to the right handler.
10777 */
10778 VBOXSTRICTRC rcStrict;
10779 if (fRepPrefix)
10780 {
10781 switch (enmAddrMode)
10782 {
10783 case IEMMODE_16BIT:
10784 switch (cbValue)
10785 {
10786 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10787 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10788 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10789 default:
10790 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10791 }
10792 break;
10793
10794 case IEMMODE_32BIT:
10795 switch (cbValue)
10796 {
10797 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10798 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10799 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10800 default:
10801 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10802 }
10803 break;
10804
10805 case IEMMODE_64BIT:
10806 switch (cbValue)
10807 {
10808 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10809 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10810 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10811 default:
10812 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10813 }
10814 break;
10815
10816 default:
10817 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10818 }
10819 }
10820 else
10821 {
10822 switch (enmAddrMode)
10823 {
10824 case IEMMODE_16BIT:
10825 switch (cbValue)
10826 {
10827 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10828 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10829 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10830 default:
10831 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10832 }
10833 break;
10834
10835 case IEMMODE_32BIT:
10836 switch (cbValue)
10837 {
10838 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10839 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10840 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10841 default:
10842 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10843 }
10844 break;
10845
10846 case IEMMODE_64BIT:
10847 switch (cbValue)
10848 {
10849 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10850 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10851 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10852 default:
10853 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10854 }
10855 break;
10856
10857 default:
10858 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10859 }
10860 }
10861
10862 if (pVCpu->iem.s.cActiveMappings)
10863 iemMemRollback(pVCpu);
10864
10865 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10866}
10867
10868
10869/**
10870 * Interface for HM and EM for executing string I/O IN (read) instructions.
10871 *
10872 * This API ASSUMES that the caller has already verified that the guest code is
10873 * allowed to access the I/O port. (The I/O port is in the DX register in the
10874 * guest state.)
10875 *
10876 * @returns Strict VBox status code.
10877 * @param pVCpu The cross context virtual CPU structure.
10878 * @param cbValue The size of the I/O port access (1, 2, or 4).
10879 * @param enmAddrMode The addressing mode.
10880 * @param fRepPrefix Indicates whether a repeat prefix is used
10881 * (doesn't matter which for this instruction).
10882 * @param cbInstr The instruction length in bytes.
10883 * @param fIoChecked Whether the access to the I/O port has been
10884 * checked or not. It's typically checked in the
10885 * HM scenario.
10886 */
10887VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10888 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10889{
10890 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10891
10892 /*
10893 * State init.
10894 */
10895 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10896
10897 /*
10898 * Switch orgy for getting to the right handler.
10899 */
10900 VBOXSTRICTRC rcStrict;
10901 if (fRepPrefix)
10902 {
10903 switch (enmAddrMode)
10904 {
10905 case IEMMODE_16BIT:
10906 switch (cbValue)
10907 {
10908 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10909 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10910 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10911 default:
10912 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10913 }
10914 break;
10915
10916 case IEMMODE_32BIT:
10917 switch (cbValue)
10918 {
10919 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10920 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10921 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10922 default:
10923 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10924 }
10925 break;
10926
10927 case IEMMODE_64BIT:
10928 switch (cbValue)
10929 {
10930 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10931 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10932 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10933 default:
10934 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10935 }
10936 break;
10937
10938 default:
10939 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10940 }
10941 }
10942 else
10943 {
10944 switch (enmAddrMode)
10945 {
10946 case IEMMODE_16BIT:
10947 switch (cbValue)
10948 {
10949 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10950 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10951 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10952 default:
10953 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10954 }
10955 break;
10956
10957 case IEMMODE_32BIT:
10958 switch (cbValue)
10959 {
10960 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10961 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10962 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10963 default:
10964 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10965 }
10966 break;
10967
10968 case IEMMODE_64BIT:
10969 switch (cbValue)
10970 {
10971 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10972 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10973 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10974 default:
10975 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10976 }
10977 break;
10978
10979 default:
10980 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10981 }
10982 }
10983
10984 if ( pVCpu->iem.s.cActiveMappings == 0
10985 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10986 { /* likely */ }
10987 else
10988 {
10989 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10990 iemMemRollback(pVCpu);
10991 }
10992 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10993}
10994
10995
10996/**
10997 * Interface for rawmode to write execute an OUT instruction.
10998 *
10999 * @returns Strict VBox status code.
11000 * @param pVCpu The cross context virtual CPU structure.
11001 * @param cbInstr The instruction length in bytes.
11002 * @param u16Port The port to read.
11003 * @param fImm Whether the port is specified using an immediate operand or
11004 * using the implicit DX register.
11005 * @param cbReg The register size.
11006 *
11007 * @remarks In ring-0 not all of the state needs to be synced in.
11008 */
11009VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11010{
11011 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11012 Assert(cbReg <= 4 && cbReg != 3);
11013
11014 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11015 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11016 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11017 Assert(!pVCpu->iem.s.cActiveMappings);
11018 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11019}
11020
11021
11022/**
11023 * Interface for rawmode to write execute an IN instruction.
11024 *
11025 * @returns Strict VBox status code.
11026 * @param pVCpu The cross context virtual CPU structure.
11027 * @param cbInstr The instruction length in bytes.
11028 * @param u16Port The port to read.
11029 * @param fImm Whether the port is specified using an immediate operand or
11030 * using the implicit DX.
11031 * @param cbReg The register size.
11032 */
11033VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11034{
11035 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11036 Assert(cbReg <= 4 && cbReg != 3);
11037
11038 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11039 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11040 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11041 Assert(!pVCpu->iem.s.cActiveMappings);
11042 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11043}
11044
11045
11046/**
11047 * Interface for HM and EM to write to a CRx register.
11048 *
11049 * @returns Strict VBox status code.
11050 * @param pVCpu The cross context virtual CPU structure.
11051 * @param cbInstr The instruction length in bytes.
11052 * @param iCrReg The control register number (destination).
11053 * @param iGReg The general purpose register number (source).
11054 *
11055 * @remarks In ring-0 not all of the state needs to be synced in.
11056 */
11057VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11058{
11059 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11060 Assert(iCrReg < 16);
11061 Assert(iGReg < 16);
11062
11063 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11064 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11065 Assert(!pVCpu->iem.s.cActiveMappings);
11066 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11067}
11068
11069
11070/**
11071 * Interface for HM and EM to read from a CRx register.
11072 *
11073 * @returns Strict VBox status code.
11074 * @param pVCpu The cross context virtual CPU structure.
11075 * @param cbInstr The instruction length in bytes.
11076 * @param iGReg The general purpose register number (destination).
11077 * @param iCrReg The control register number (source).
11078 *
11079 * @remarks In ring-0 not all of the state needs to be synced in.
11080 */
11081VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11082{
11083 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11084 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11085 | CPUMCTX_EXTRN_APIC_TPR);
11086 Assert(iCrReg < 16);
11087 Assert(iGReg < 16);
11088
11089 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11090 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11091 Assert(!pVCpu->iem.s.cActiveMappings);
11092 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11093}
11094
11095
11096/**
11097 * Interface for HM and EM to write to a DRx register.
11098 *
11099 * @returns Strict VBox status code.
11100 * @param pVCpu The cross context virtual CPU structure.
11101 * @param cbInstr The instruction length in bytes.
11102 * @param iDrReg The debug register number (destination).
11103 * @param iGReg The general purpose register number (source).
11104 *
11105 * @remarks In ring-0 not all of the state needs to be synced in.
11106 */
11107VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11108{
11109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11110 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11111 Assert(iDrReg < 8);
11112 Assert(iGReg < 16);
11113
11114 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11116 Assert(!pVCpu->iem.s.cActiveMappings);
11117 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11118}
11119
11120
11121/**
11122 * Interface for HM and EM to read from a DRx register.
11123 *
11124 * @returns Strict VBox status code.
11125 * @param pVCpu The cross context virtual CPU structure.
11126 * @param cbInstr The instruction length in bytes.
11127 * @param iGReg The general purpose register number (destination).
11128 * @param iDrReg The debug register number (source).
11129 *
11130 * @remarks In ring-0 not all of the state needs to be synced in.
11131 */
11132VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11133{
11134 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11136 Assert(iDrReg < 8);
11137 Assert(iGReg < 16);
11138
11139 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11140 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11141 Assert(!pVCpu->iem.s.cActiveMappings);
11142 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11143}
11144
11145
11146/**
11147 * Interface for HM and EM to clear the CR0[TS] bit.
11148 *
11149 * @returns Strict VBox status code.
11150 * @param pVCpu The cross context virtual CPU structure.
11151 * @param cbInstr The instruction length in bytes.
11152 *
11153 * @remarks In ring-0 not all of the state needs to be synced in.
11154 */
11155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11156{
11157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11158
11159 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11161 Assert(!pVCpu->iem.s.cActiveMappings);
11162 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11163}
11164
11165
11166/**
11167 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11168 *
11169 * @returns Strict VBox status code.
11170 * @param pVCpu The cross context virtual CPU structure.
11171 * @param cbInstr The instruction length in bytes.
11172 * @param uValue The value to load into CR0.
11173 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11174 * memory operand. Otherwise pass NIL_RTGCPTR.
11175 *
11176 * @remarks In ring-0 not all of the state needs to be synced in.
11177 */
11178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11179{
11180 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11181
11182 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11183 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11184 Assert(!pVCpu->iem.s.cActiveMappings);
11185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11186}
11187
11188
11189/**
11190 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11191 *
11192 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11193 *
11194 * @returns Strict VBox status code.
11195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11196 * @param cbInstr The instruction length in bytes.
11197 * @remarks In ring-0 not all of the state needs to be synced in.
11198 * @thread EMT(pVCpu)
11199 */
11200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11201{
11202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11203
11204 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11206 Assert(!pVCpu->iem.s.cActiveMappings);
11207 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11208}
11209
11210
11211/**
11212 * Interface for HM and EM to emulate the WBINVD instruction.
11213 *
11214 * @returns Strict VBox status code.
11215 * @param pVCpu The cross context virtual CPU structure.
11216 * @param cbInstr The instruction length in bytes.
11217 *
11218 * @remarks In ring-0 not all of the state needs to be synced in.
11219 */
11220VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11221{
11222 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11223
11224 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11225 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11226 Assert(!pVCpu->iem.s.cActiveMappings);
11227 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11228}
11229
11230
11231/**
11232 * Interface for HM and EM to emulate the INVD instruction.
11233 *
11234 * @returns Strict VBox status code.
11235 * @param pVCpu The cross context virtual CPU structure.
11236 * @param cbInstr The instruction length in bytes.
11237 *
11238 * @remarks In ring-0 not all of the state needs to be synced in.
11239 */
11240VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11241{
11242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11243
11244 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11245 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11246 Assert(!pVCpu->iem.s.cActiveMappings);
11247 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11248}
11249
11250
11251/**
11252 * Interface for HM and EM to emulate the INVLPG instruction.
11253 *
11254 * @returns Strict VBox status code.
11255 * @retval VINF_PGM_SYNC_CR3
11256 *
11257 * @param pVCpu The cross context virtual CPU structure.
11258 * @param cbInstr The instruction length in bytes.
11259 * @param GCPtrPage The effective address of the page to invalidate.
11260 *
11261 * @remarks In ring-0 not all of the state needs to be synced in.
11262 */
11263VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11264{
11265 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11266
11267 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11268 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11269 Assert(!pVCpu->iem.s.cActiveMappings);
11270 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11271}
11272
11273
11274/**
11275 * Interface for HM and EM to emulate the INVPCID instruction.
11276 *
11277 * @returns Strict VBox status code.
11278 * @retval VINF_PGM_SYNC_CR3
11279 *
11280 * @param pVCpu The cross context virtual CPU structure.
11281 * @param cbInstr The instruction length in bytes.
11282 * @param iEffSeg The effective segment register.
11283 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11284 * @param uType The invalidation type.
11285 *
11286 * @remarks In ring-0 not all of the state needs to be synced in.
11287 */
11288VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11289 uint64_t uType)
11290{
11291 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11292
11293 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11294 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11295 Assert(!pVCpu->iem.s.cActiveMappings);
11296 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11297}
11298
11299
11300/**
11301 * Interface for HM and EM to emulate the CPUID instruction.
11302 *
11303 * @returns Strict VBox status code.
11304 *
11305 * @param pVCpu The cross context virtual CPU structure.
11306 * @param cbInstr The instruction length in bytes.
11307 *
11308 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11309 */
11310VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11311{
11312 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11313 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11314
11315 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11316 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11317 Assert(!pVCpu->iem.s.cActiveMappings);
11318 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11319}
11320
11321
11322/**
11323 * Interface for HM and EM to emulate the RDPMC instruction.
11324 *
11325 * @returns Strict VBox status code.
11326 *
11327 * @param pVCpu The cross context virtual CPU structure.
11328 * @param cbInstr The instruction length in bytes.
11329 *
11330 * @remarks Not all of the state needs to be synced in.
11331 */
11332VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11333{
11334 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11335 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11336
11337 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11338 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11339 Assert(!pVCpu->iem.s.cActiveMappings);
11340 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11341}
11342
11343
11344/**
11345 * Interface for HM and EM to emulate the RDTSC instruction.
11346 *
11347 * @returns Strict VBox status code.
11348 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11349 *
11350 * @param pVCpu The cross context virtual CPU structure.
11351 * @param cbInstr The instruction length in bytes.
11352 *
11353 * @remarks Not all of the state needs to be synced in.
11354 */
11355VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11356{
11357 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11358 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11359
11360 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11361 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11362 Assert(!pVCpu->iem.s.cActiveMappings);
11363 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11364}
11365
11366
11367/**
11368 * Interface for HM and EM to emulate the RDTSCP instruction.
11369 *
11370 * @returns Strict VBox status code.
11371 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11372 *
11373 * @param pVCpu The cross context virtual CPU structure.
11374 * @param cbInstr The instruction length in bytes.
11375 *
11376 * @remarks Not all of the state needs to be synced in. Recommended
11377 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11378 */
11379VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11380{
11381 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11382 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11383
11384 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11385 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11386 Assert(!pVCpu->iem.s.cActiveMappings);
11387 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11388}
11389
11390
11391/**
11392 * Interface for HM and EM to emulate the RDMSR instruction.
11393 *
11394 * @returns Strict VBox status code.
11395 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11396 *
11397 * @param pVCpu The cross context virtual CPU structure.
11398 * @param cbInstr The instruction length in bytes.
11399 *
11400 * @remarks Not all of the state needs to be synced in. Requires RCX and
11401 * (currently) all MSRs.
11402 */
11403VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11404{
11405 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11406 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11407
11408 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11409 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11410 Assert(!pVCpu->iem.s.cActiveMappings);
11411 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11412}
11413
11414
11415/**
11416 * Interface for HM and EM to emulate the WRMSR instruction.
11417 *
11418 * @returns Strict VBox status code.
11419 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11420 *
11421 * @param pVCpu The cross context virtual CPU structure.
11422 * @param cbInstr The instruction length in bytes.
11423 *
11424 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11425 * and (currently) all MSRs.
11426 */
11427VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11428{
11429 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11430 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11431 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11432
11433 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11434 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11435 Assert(!pVCpu->iem.s.cActiveMappings);
11436 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11437}
11438
11439
11440/**
11441 * Interface for HM and EM to emulate the MONITOR instruction.
11442 *
11443 * @returns Strict VBox status code.
11444 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11445 *
11446 * @param pVCpu The cross context virtual CPU structure.
11447 * @param cbInstr The instruction length in bytes.
11448 *
11449 * @remarks Not all of the state needs to be synced in.
11450 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11451 * are used.
11452 */
11453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11454{
11455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11456 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11457
11458 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11460 Assert(!pVCpu->iem.s.cActiveMappings);
11461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11462}
11463
11464
11465/**
11466 * Interface for HM and EM to emulate the MWAIT instruction.
11467 *
11468 * @returns Strict VBox status code.
11469 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11470 *
11471 * @param pVCpu The cross context virtual CPU structure.
11472 * @param cbInstr The instruction length in bytes.
11473 *
11474 * @remarks Not all of the state needs to be synced in.
11475 */
11476VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11477{
11478 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11479 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11480
11481 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11482 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11483 Assert(!pVCpu->iem.s.cActiveMappings);
11484 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11485}
11486
11487
11488/**
11489 * Interface for HM and EM to emulate the HLT instruction.
11490 *
11491 * @returns Strict VBox status code.
11492 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11493 *
11494 * @param pVCpu The cross context virtual CPU structure.
11495 * @param cbInstr The instruction length in bytes.
11496 *
11497 * @remarks Not all of the state needs to be synced in.
11498 */
11499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11500{
11501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11502
11503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11505 Assert(!pVCpu->iem.s.cActiveMappings);
11506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11507}
11508
11509
11510/**
11511 * Checks if IEM is in the process of delivering an event (interrupt or
11512 * exception).
11513 *
11514 * @returns true if we're in the process of raising an interrupt or exception,
11515 * false otherwise.
11516 * @param pVCpu The cross context virtual CPU structure.
11517 * @param puVector Where to store the vector associated with the
11518 * currently delivered event, optional.
11519 * @param pfFlags Where to store th event delivery flags (see
11520 * IEM_XCPT_FLAGS_XXX), optional.
11521 * @param puErr Where to store the error code associated with the
11522 * event, optional.
11523 * @param puCr2 Where to store the CR2 associated with the event,
11524 * optional.
11525 * @remarks The caller should check the flags to determine if the error code and
11526 * CR2 are valid for the event.
11527 */
11528VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11529{
11530 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11531 if (fRaisingXcpt)
11532 {
11533 if (puVector)
11534 *puVector = pVCpu->iem.s.uCurXcpt;
11535 if (pfFlags)
11536 *pfFlags = pVCpu->iem.s.fCurXcpt;
11537 if (puErr)
11538 *puErr = pVCpu->iem.s.uCurXcptErr;
11539 if (puCr2)
11540 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11541 }
11542 return fRaisingXcpt;
11543}
11544
11545#ifdef IN_RING3
11546
11547/**
11548 * Handles the unlikely and probably fatal merge cases.
11549 *
11550 * @returns Merged status code.
11551 * @param rcStrict Current EM status code.
11552 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11553 * with @a rcStrict.
11554 * @param iMemMap The memory mapping index. For error reporting only.
11555 * @param pVCpu The cross context virtual CPU structure of the calling
11556 * thread, for error reporting only.
11557 */
11558DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11559 unsigned iMemMap, PVMCPUCC pVCpu)
11560{
11561 if (RT_FAILURE_NP(rcStrict))
11562 return rcStrict;
11563
11564 if (RT_FAILURE_NP(rcStrictCommit))
11565 return rcStrictCommit;
11566
11567 if (rcStrict == rcStrictCommit)
11568 return rcStrictCommit;
11569
11570 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11571 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11572 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11573 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11574 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11575 return VERR_IOM_FF_STATUS_IPE;
11576}
11577
11578
11579/**
11580 * Helper for IOMR3ProcessForceFlag.
11581 *
11582 * @returns Merged status code.
11583 * @param rcStrict Current EM status code.
11584 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11585 * with @a rcStrict.
11586 * @param iMemMap The memory mapping index. For error reporting only.
11587 * @param pVCpu The cross context virtual CPU structure of the calling
11588 * thread, for error reporting only.
11589 */
11590DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11591{
11592 /* Simple. */
11593 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11594 return rcStrictCommit;
11595
11596 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11597 return rcStrict;
11598
11599 /* EM scheduling status codes. */
11600 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11601 && rcStrict <= VINF_EM_LAST))
11602 {
11603 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11604 && rcStrictCommit <= VINF_EM_LAST))
11605 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11606 }
11607
11608 /* Unlikely */
11609 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11610}
11611
11612
11613/**
11614 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11615 *
11616 * @returns Merge between @a rcStrict and what the commit operation returned.
11617 * @param pVM The cross context VM structure.
11618 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11619 * @param rcStrict The status code returned by ring-0 or raw-mode.
11620 */
11621VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11622{
11623 /*
11624 * Reset the pending commit.
11625 */
11626 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11627 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11628 ("%#x %#x %#x\n",
11629 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11630 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11631
11632 /*
11633 * Commit the pending bounce buffers (usually just one).
11634 */
11635 unsigned cBufs = 0;
11636 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11637 while (iMemMap-- > 0)
11638 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11639 {
11640 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11641 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11642 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11643
11644 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11645 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11646 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11647
11648 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11649 {
11650 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11651 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11652 pbBuf,
11653 cbFirst,
11654 PGMACCESSORIGIN_IEM);
11655 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11656 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11657 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11658 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11659 }
11660
11661 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11662 {
11663 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11664 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11665 pbBuf + cbFirst,
11666 cbSecond,
11667 PGMACCESSORIGIN_IEM);
11668 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11669 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11670 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11671 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11672 }
11673 cBufs++;
11674 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11675 }
11676
11677 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11678 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11679 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11680 pVCpu->iem.s.cActiveMappings = 0;
11681 return rcStrict;
11682}
11683
11684#endif /* IN_RING3 */
11685
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette