VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105284

最後變更 在這個檔案從105284是 105284,由 vboxsync 提交於 8 月 前

VMM/IEM: Large page TLB optimizations for INVLPG. bugref:10687

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 458.9 KB
 
1/* $Id: IEMAll.cpp 105284 2024-07-12 00:14:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729# endif
730
731# ifdef IEM_WITH_DATA_TLB
732 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
733# endif
734#else
735 RT_NOREF(pVCpu);
736#endif
737}
738
739
740/**
741 * Invalidates non-global the IEM TLB entries.
742 *
743 * This is called internally as well as by PGM when moving GC mappings.
744 *
745 * @param pVCpu The cross context virtual CPU structure of the calling
746 * thread.
747 */
748VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
749{
750 iemTlbInvalidateAll<false>(pVCpu);
751}
752
753
754/**
755 * Invalidates all the IEM TLB entries.
756 *
757 * This is called internally as well as by PGM when moving GC mappings.
758 *
759 * @param pVCpu The cross context virtual CPU structure of the calling
760 * thread.
761 */
762VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
763{
764 iemTlbInvalidateAll<true>(pVCpu);
765}
766
767
768#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
769
770template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
771DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
772{
773 /* Combine TAG values with the TLB revisions. */
774 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
775 if (a_fNonGlobal)
776 GCPtrTag |= pTlb->uTlbRevision;
777
778 /* Set up the scan. */
779 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024);
780 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
781 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + (a_f2MbLargePage ? 512 : 1024) : IEMTLB_ENTRY_COUNT;
782 RTGCPTR const GCPtrTagMask = fPartialScan
783 ? ~(RTGCPTR)0
784 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
785 & ~(RTGCPTR)( ( RT_BIT_64((a_f2MbLargePage ? 9 : 10) - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO)
786 - 1U)
787 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
788
789 /*
790 * Do the scanning.
791 */
792 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
793 {
794 if (a_fNonGlobal)
795 {
796 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
797 {
798 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
799 {
800 pTlb->aEntries[idxEven].uTag = 0;
801 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 }
804 }
805 GCPtrTag++;
806 }
807
808 if (a_fGlobal)
809 {
810 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
811 {
812 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
813 {
814 pTlb->aEntries[idxEven + 1].uTag = 0;
815 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
816 pVCpu->iem.s.cbInstrBufTotal = 0;
817 }
818 }
819 GCPtrTagGlob++;
820 }
821 }
822
823}
824
825template<bool const a_fDataTlb, bool const a_f2MbLargePage>
826DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
827{
828 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
829
830 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
831 if ( pTlb->GlobalLargePageRange.uFirstTag >= GCPtrTag
832 && pTlb->GlobalLargePageRange.uLastTag <= GCPtrTag)
833 {
834 if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
835 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
836 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
837 else
838 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
839 }
840 else if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
841 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
842 { /* Large pages aren't as likely in the non-global TLB half. */ }
843 else
844 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
845}
846
847template<bool const a_fDataTlb>
848DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
849{
850 /*
851 * Flush the entry pair.
852 *
853 * We ASSUME that the guest hasn't tricked us into loading one of these
854 * from a large page and the other from a regular 4KB page. This is made
855 * much less of a problem, in that the guest would also have to flip the
856 * G bit to accomplish this.
857 */
858 int fMaybeLargePage = -1;
859 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
860 {
861 pTlb->aEntries[idxEven].uTag = 0;
862 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
863 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
864 pVCpu->iem.s.cbInstrBufTotal = 0;
865 }
866 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
867 {
868 pTlb->aEntries[idxEven + 1].uTag = 0;
869 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
870 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
871 pVCpu->iem.s.cbInstrBufTotal = 0;
872 }
873
874 /*
875 * If we cannot rule out a large page, we have to scan all the 4K TLB
876 * entries such a page covers to ensure we evict all relevant entries.
877 * ASSUMES that tag calculation is a right shift by GUEST_PAGE_SHIFT.
878 */
879 if ( fMaybeLargePage
880# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
881 && (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads))
882# else
883 && (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag))
884# endif
885 {
886 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
887 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
888 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
889 else
890 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
891 }
892}
893
894#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
895
896/**
897 * Invalidates a page in the TLBs.
898 *
899 * @param pVCpu The cross context virtual CPU structure of the calling
900 * thread.
901 * @param GCPtr The address of the page to invalidate
902 * @thread EMT(pVCpu)
903 */
904VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
905{
906#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
907 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
908 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
909 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
910 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
911
912# ifdef IEM_WITH_CODE_TLB
913 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
914# endif
915# ifdef IEM_WITH_DATA_TLB
916 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
917# endif
918#else
919 NOREF(pVCpu); NOREF(GCPtr);
920#endif
921}
922
923
924#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
925/**
926 * Invalid both TLBs slow fashion following a rollover.
927 *
928 * Worker for IEMTlbInvalidateAllPhysical,
929 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
930 * iemMemMapJmp and others.
931 *
932 * @thread EMT(pVCpu)
933 */
934static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
935{
936 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
937 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
938 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
939
940 unsigned i;
941# ifdef IEM_WITH_CODE_TLB
942 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
943 while (i-- > 0)
944 {
945 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
946 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
947 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
948 }
949 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
950 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
951# endif
952# ifdef IEM_WITH_DATA_TLB
953 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
954 while (i-- > 0)
955 {
956 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
957 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
958 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
959 }
960 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
961 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
962# endif
963
964}
965#endif
966
967
968/**
969 * Invalidates the host physical aspects of the IEM TLBs.
970 *
971 * This is called internally as well as by PGM when moving GC mappings.
972 *
973 * @param pVCpu The cross context virtual CPU structure of the calling
974 * thread.
975 * @note Currently not used.
976 */
977VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
978{
979#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
980 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
981 Log10(("IEMTlbInvalidateAllPhysical\n"));
982
983# ifdef IEM_WITH_CODE_TLB
984 pVCpu->iem.s.cbInstrBufTotal = 0;
985# endif
986 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
987 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
988 {
989 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
990 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
991 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
992 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
993 }
994 else
995 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
996#else
997 NOREF(pVCpu);
998#endif
999}
1000
1001
1002/**
1003 * Invalidates the host physical aspects of the IEM TLBs.
1004 *
1005 * This is called internally as well as by PGM when moving GC mappings.
1006 *
1007 * @param pVM The cross context VM structure.
1008 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1009 * otherwise NIL_VMCPUID.
1010 * @param enmReason The reason we're called.
1011 *
1012 * @remarks Caller holds the PGM lock.
1013 */
1014VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1015{
1016#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1017 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1018 if (pVCpuCaller)
1019 VMCPU_ASSERT_EMT(pVCpuCaller);
1020 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1021
1022 VMCC_FOR_EACH_VMCPU(pVM)
1023 {
1024# ifdef IEM_WITH_CODE_TLB
1025 if (pVCpuCaller == pVCpu)
1026 pVCpu->iem.s.cbInstrBufTotal = 0;
1027# endif
1028
1029 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1030 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1031 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1032 { /* likely */}
1033 else if (pVCpuCaller != pVCpu)
1034 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1035 else
1036 {
1037 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1038 continue;
1039 }
1040 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1041 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1042
1043 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1044 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1045 }
1046 VMCC_FOR_EACH_VMCPU_END(pVM);
1047
1048#else
1049 RT_NOREF(pVM, idCpuCaller, enmReason);
1050#endif
1051}
1052
1053
1054/**
1055 * Flushes the prefetch buffer, light version.
1056 */
1057void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1058{
1059#ifndef IEM_WITH_CODE_TLB
1060 pVCpu->iem.s.cbOpcode = cbInstr;
1061#else
1062 RT_NOREF(pVCpu, cbInstr);
1063#endif
1064}
1065
1066
1067/**
1068 * Flushes the prefetch buffer, heavy version.
1069 */
1070void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1071{
1072#ifndef IEM_WITH_CODE_TLB
1073 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1074#elif 1
1075 pVCpu->iem.s.cbInstrBufTotal = 0;
1076 RT_NOREF(cbInstr);
1077#else
1078 RT_NOREF(pVCpu, cbInstr);
1079#endif
1080}
1081
1082
1083
1084#ifdef IEM_WITH_CODE_TLB
1085
1086/**
1087 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1088 * failure and jumps.
1089 *
1090 * We end up here for a number of reasons:
1091 * - pbInstrBuf isn't yet initialized.
1092 * - Advancing beyond the buffer boundrary (e.g. cross page).
1093 * - Advancing beyond the CS segment limit.
1094 * - Fetching from non-mappable page (e.g. MMIO).
1095 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 * @param pvDst Where to return the bytes.
1100 * @param cbDst Number of bytes to read. A value of zero is
1101 * allowed for initializing pbInstrBuf (the
1102 * recompiler does this). In this case it is best
1103 * to set pbInstrBuf to NULL prior to the call.
1104 */
1105void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1106{
1107# ifdef IN_RING3
1108 for (;;)
1109 {
1110 Assert(cbDst <= 8);
1111 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1112
1113 /*
1114 * We might have a partial buffer match, deal with that first to make the
1115 * rest simpler. This is the first part of the cross page/buffer case.
1116 */
1117 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1118 if (pbInstrBuf != NULL)
1119 {
1120 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1121 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1122 if (offBuf < cbInstrBuf)
1123 {
1124 Assert(offBuf + cbDst > cbInstrBuf);
1125 uint32_t const cbCopy = cbInstrBuf - offBuf;
1126 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1127
1128 cbDst -= cbCopy;
1129 pvDst = (uint8_t *)pvDst + cbCopy;
1130 offBuf += cbCopy;
1131 }
1132 }
1133
1134 /*
1135 * Check segment limit, figuring how much we're allowed to access at this point.
1136 *
1137 * We will fault immediately if RIP is past the segment limit / in non-canonical
1138 * territory. If we do continue, there are one or more bytes to read before we
1139 * end up in trouble and we need to do that first before faulting.
1140 */
1141 RTGCPTR GCPtrFirst;
1142 uint32_t cbMaxRead;
1143 if (IEM_IS_64BIT_CODE(pVCpu))
1144 {
1145 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1146 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1147 { /* likely */ }
1148 else
1149 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1150 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1151 }
1152 else
1153 {
1154 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1155 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1156 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1157 { /* likely */ }
1158 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1159 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1160 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1161 if (cbMaxRead != 0)
1162 { /* likely */ }
1163 else
1164 {
1165 /* Overflowed because address is 0 and limit is max. */
1166 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1167 cbMaxRead = X86_PAGE_SIZE;
1168 }
1169 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1170 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1171 if (cbMaxRead2 < cbMaxRead)
1172 cbMaxRead = cbMaxRead2;
1173 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1174 }
1175
1176 /*
1177 * Get the TLB entry for this piece of code.
1178 */
1179 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1180 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1181 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1182 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1183 {
1184 /* likely when executing lots of code, otherwise unlikely */
1185# ifdef IEM_WITH_TLB_STATISTICS
1186 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1187# endif
1188 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1189
1190 /* Check TLB page table level access flags. */
1191 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1192 {
1193 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1194 {
1195 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1196 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1197 }
1198 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1199 {
1200 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1201 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1202 }
1203 }
1204
1205 /* Look up the physical page info if necessary. */
1206 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1207 { /* not necessary */ }
1208 else
1209 {
1210 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1211 { /* likely */ }
1212 else
1213 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1214 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1215 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1216 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1217 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1218 }
1219 }
1220 else
1221 {
1222 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1223
1224 /* This page table walking will set A bits as required by the access while performing the walk.
1225 ASSUMES these are set when the address is translated rather than on commit... */
1226 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1227 PGMPTWALKFAST WalkFast;
1228 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1229 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1230 &WalkFast);
1231 if (RT_SUCCESS(rc))
1232 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1233 else
1234 {
1235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1236 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1237 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1238#endif
1239 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1240 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1241 }
1242
1243 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1244 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1245 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1246 {
1247 pTlbe--;
1248 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1249 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1250 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1251 }
1252 else
1253 {
1254 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1255 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1256 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1257 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1258 }
1259 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1260 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1261 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1262 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1263 pTlbe->GCPhys = GCPhysPg;
1264 pTlbe->pbMappingR3 = NULL;
1265 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1266 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1267 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1268
1269 /* Resolve the physical address. */
1270 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1271 { /* likely */ }
1272 else
1273 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1274 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1275 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1276 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1277 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1278 }
1279
1280# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1281 /*
1282 * Try do a direct read using the pbMappingR3 pointer.
1283 * Note! Do not recheck the physical TLB revision number here as we have the
1284 * wrong response to changes in the else case. If someone is updating
1285 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1286 * pretending we always won the race.
1287 */
1288 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1289 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1290 {
1291 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1292 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1293 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1294 {
1295 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1296 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1297 }
1298 else
1299 {
1300 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1301 if (cbInstr + (uint32_t)cbDst <= 15)
1302 {
1303 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1304 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1305 }
1306 else
1307 {
1308 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1309 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1310 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1311 }
1312 }
1313 if (cbDst <= cbMaxRead)
1314 {
1315 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1316 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1317
1318 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1319 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1320 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1321 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1322 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1323 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1324 else
1325 Assert(!pvDst);
1326 return;
1327 }
1328 pVCpu->iem.s.pbInstrBuf = NULL;
1329
1330 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1331 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1332 }
1333# else
1334# error "refactor as needed"
1335 /*
1336 * If there is no special read handling, so we can read a bit more and
1337 * put it in the prefetch buffer.
1338 */
1339 if ( cbDst < cbMaxRead
1340 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1341 {
1342 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1343 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1344 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1345 { /* likely */ }
1346 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1347 {
1348 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1349 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1350 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1351 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1352 }
1353 else
1354 {
1355 Log((RT_SUCCESS(rcStrict)
1356 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1357 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1358 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1359 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1360 }
1361 }
1362# endif
1363 /*
1364 * Special read handling, so only read exactly what's needed.
1365 * This is a highly unlikely scenario.
1366 */
1367 else
1368 {
1369 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1370
1371 /* Check instruction length. */
1372 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1373 if (RT_LIKELY(cbInstr + cbDst <= 15))
1374 { /* likely */ }
1375 else
1376 {
1377 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1378 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1379 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1380 }
1381
1382 /* Do the reading. */
1383 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1384 if (cbToRead > 0)
1385 {
1386 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1387 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1388 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1389 { /* likely */ }
1390 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1391 {
1392 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1393 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1394 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1395 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1396 }
1397 else
1398 {
1399 Log((RT_SUCCESS(rcStrict)
1400 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1401 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1402 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1403 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1404 }
1405 }
1406
1407 /* Update the state and probably return. */
1408 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1409 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1410 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1411
1412 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1413 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1414 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1415 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1416 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1417 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1418 pVCpu->iem.s.pbInstrBuf = NULL;
1419 if (cbToRead == cbDst)
1420 return;
1421 Assert(cbToRead == cbMaxRead);
1422 }
1423
1424 /*
1425 * More to read, loop.
1426 */
1427 cbDst -= cbMaxRead;
1428 pvDst = (uint8_t *)pvDst + cbMaxRead;
1429 }
1430# else /* !IN_RING3 */
1431 RT_NOREF(pvDst, cbDst);
1432 if (pvDst || cbDst)
1433 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1434# endif /* !IN_RING3 */
1435}
1436
1437#else /* !IEM_WITH_CODE_TLB */
1438
1439/**
1440 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1441 * exception if it fails.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure of the
1445 * calling thread.
1446 * @param cbMin The minimum number of bytes relative offOpcode
1447 * that must be read.
1448 */
1449VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1450{
1451 /*
1452 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1453 *
1454 * First translate CS:rIP to a physical address.
1455 */
1456 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1457 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1458 uint8_t const cbLeft = cbOpcode - offOpcode;
1459 Assert(cbLeft < cbMin);
1460 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1461
1462 uint32_t cbToTryRead;
1463 RTGCPTR GCPtrNext;
1464 if (IEM_IS_64BIT_CODE(pVCpu))
1465 {
1466 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1467 if (!IEM_IS_CANONICAL(GCPtrNext))
1468 return iemRaiseGeneralProtectionFault0(pVCpu);
1469 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1470 }
1471 else
1472 {
1473 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1474 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1475 GCPtrNext32 += cbOpcode;
1476 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1477 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1478 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1479 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1480 if (!cbToTryRead) /* overflowed */
1481 {
1482 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1483 cbToTryRead = UINT32_MAX;
1484 /** @todo check out wrapping around the code segment. */
1485 }
1486 if (cbToTryRead < cbMin - cbLeft)
1487 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1488 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1489
1490 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1491 if (cbToTryRead > cbLeftOnPage)
1492 cbToTryRead = cbLeftOnPage;
1493 }
1494
1495 /* Restrict to opcode buffer space.
1496
1497 We're making ASSUMPTIONS here based on work done previously in
1498 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1499 be fetched in case of an instruction crossing two pages. */
1500 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1501 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1502 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1503 { /* likely */ }
1504 else
1505 {
1506 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1507 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1508 return iemRaiseGeneralProtectionFault0(pVCpu);
1509 }
1510
1511 PGMPTWALKFAST WalkFast;
1512 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1513 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1514 &WalkFast);
1515 if (RT_SUCCESS(rc))
1516 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1517 else
1518 {
1519 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1521 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1522 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1523#endif
1524 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1525 }
1526 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1527 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1528
1529 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1530 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1531
1532 /*
1533 * Read the bytes at this address.
1534 *
1535 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1536 * and since PATM should only patch the start of an instruction there
1537 * should be no need to check again here.
1538 */
1539 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1540 {
1541 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1542 cbToTryRead, PGMACCESSORIGIN_IEM);
1543 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1544 { /* likely */ }
1545 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1546 {
1547 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1548 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1549 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1550 }
1551 else
1552 {
1553 Log((RT_SUCCESS(rcStrict)
1554 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1555 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1556 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1557 return rcStrict;
1558 }
1559 }
1560 else
1561 {
1562 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1563 if (RT_SUCCESS(rc))
1564 { /* likely */ }
1565 else
1566 {
1567 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1568 return rc;
1569 }
1570 }
1571 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1572 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1573
1574 return VINF_SUCCESS;
1575}
1576
1577#endif /* !IEM_WITH_CODE_TLB */
1578#ifndef IEM_WITH_SETJMP
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pVCpu The cross context virtual CPU structure of the
1585 * calling thread.
1586 * @param pb Where to return the opcode byte.
1587 */
1588VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1589{
1590 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1591 if (rcStrict == VINF_SUCCESS)
1592 {
1593 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1594 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1595 pVCpu->iem.s.offOpcode = offOpcode + 1;
1596 }
1597 else
1598 *pb = 0;
1599 return rcStrict;
1600}
1601
1602#else /* IEM_WITH_SETJMP */
1603
1604/**
1605 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1606 *
1607 * @returns The opcode byte.
1608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1609 */
1610uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1611{
1612# ifdef IEM_WITH_CODE_TLB
1613 uint8_t u8;
1614 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1615 return u8;
1616# else
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1618 if (rcStrict == VINF_SUCCESS)
1619 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1620 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1621# endif
1622}
1623
1624#endif /* IEM_WITH_SETJMP */
1625
1626#ifndef IEM_WITH_SETJMP
1627
1628/**
1629 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1630 *
1631 * @returns Strict VBox status code.
1632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1633 * @param pu16 Where to return the opcode dword.
1634 */
1635VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1636{
1637 uint8_t u8;
1638 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1639 if (rcStrict == VINF_SUCCESS)
1640 *pu16 = (int8_t)u8;
1641 return rcStrict;
1642}
1643
1644
1645/**
1646 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1647 *
1648 * @returns Strict VBox status code.
1649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1650 * @param pu32 Where to return the opcode dword.
1651 */
1652VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1653{
1654 uint8_t u8;
1655 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1656 if (rcStrict == VINF_SUCCESS)
1657 *pu32 = (int8_t)u8;
1658 return rcStrict;
1659}
1660
1661
1662/**
1663 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1664 *
1665 * @returns Strict VBox status code.
1666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1667 * @param pu64 Where to return the opcode qword.
1668 */
1669VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1670{
1671 uint8_t u8;
1672 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1673 if (rcStrict == VINF_SUCCESS)
1674 *pu64 = (int8_t)u8;
1675 return rcStrict;
1676}
1677
1678#endif /* !IEM_WITH_SETJMP */
1679
1680
1681#ifndef IEM_WITH_SETJMP
1682
1683/**
1684 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1685 *
1686 * @returns Strict VBox status code.
1687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1688 * @param pu16 Where to return the opcode word.
1689 */
1690VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1691{
1692 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1693 if (rcStrict == VINF_SUCCESS)
1694 {
1695 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1696# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1697 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1698# else
1699 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1700# endif
1701 pVCpu->iem.s.offOpcode = offOpcode + 2;
1702 }
1703 else
1704 *pu16 = 0;
1705 return rcStrict;
1706}
1707
1708#else /* IEM_WITH_SETJMP */
1709
1710/**
1711 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1712 *
1713 * @returns The opcode word.
1714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1715 */
1716uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1717{
1718# ifdef IEM_WITH_CODE_TLB
1719 uint16_t u16;
1720 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1721 return u16;
1722# else
1723 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1724 if (rcStrict == VINF_SUCCESS)
1725 {
1726 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1727 pVCpu->iem.s.offOpcode += 2;
1728# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1729 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1730# else
1731 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1732# endif
1733 }
1734 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1735# endif
1736}
1737
1738#endif /* IEM_WITH_SETJMP */
1739
1740#ifndef IEM_WITH_SETJMP
1741
1742/**
1743 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1744 *
1745 * @returns Strict VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1747 * @param pu32 Where to return the opcode double word.
1748 */
1749VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1750{
1751 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1752 if (rcStrict == VINF_SUCCESS)
1753 {
1754 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1755 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1756 pVCpu->iem.s.offOpcode = offOpcode + 2;
1757 }
1758 else
1759 *pu32 = 0;
1760 return rcStrict;
1761}
1762
1763
1764/**
1765 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1766 *
1767 * @returns Strict VBox status code.
1768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1769 * @param pu64 Where to return the opcode quad word.
1770 */
1771VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1772{
1773 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1774 if (rcStrict == VINF_SUCCESS)
1775 {
1776 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1777 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1778 pVCpu->iem.s.offOpcode = offOpcode + 2;
1779 }
1780 else
1781 *pu64 = 0;
1782 return rcStrict;
1783}
1784
1785#endif /* !IEM_WITH_SETJMP */
1786
1787#ifndef IEM_WITH_SETJMP
1788
1789/**
1790 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1791 *
1792 * @returns Strict VBox status code.
1793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1794 * @param pu32 Where to return the opcode dword.
1795 */
1796VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1797{
1798 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1799 if (rcStrict == VINF_SUCCESS)
1800 {
1801 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1802# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1803 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1804# else
1805 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1806 pVCpu->iem.s.abOpcode[offOpcode + 1],
1807 pVCpu->iem.s.abOpcode[offOpcode + 2],
1808 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1809# endif
1810 pVCpu->iem.s.offOpcode = offOpcode + 4;
1811 }
1812 else
1813 *pu32 = 0;
1814 return rcStrict;
1815}
1816
1817#else /* IEM_WITH_SETJMP */
1818
1819/**
1820 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1821 *
1822 * @returns The opcode dword.
1823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1824 */
1825uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1826{
1827# ifdef IEM_WITH_CODE_TLB
1828 uint32_t u32;
1829 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1830 return u32;
1831# else
1832 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1833 if (rcStrict == VINF_SUCCESS)
1834 {
1835 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1836 pVCpu->iem.s.offOpcode = offOpcode + 4;
1837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1839# else
1840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1841 pVCpu->iem.s.abOpcode[offOpcode + 1],
1842 pVCpu->iem.s.abOpcode[offOpcode + 2],
1843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1844# endif
1845 }
1846 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1847# endif
1848}
1849
1850#endif /* IEM_WITH_SETJMP */
1851
1852#ifndef IEM_WITH_SETJMP
1853
1854/**
1855 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1856 *
1857 * @returns Strict VBox status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param pu64 Where to return the opcode dword.
1860 */
1861VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1862{
1863 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1864 if (rcStrict == VINF_SUCCESS)
1865 {
1866 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1867 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1868 pVCpu->iem.s.abOpcode[offOpcode + 1],
1869 pVCpu->iem.s.abOpcode[offOpcode + 2],
1870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1871 pVCpu->iem.s.offOpcode = offOpcode + 4;
1872 }
1873 else
1874 *pu64 = 0;
1875 return rcStrict;
1876}
1877
1878
1879/**
1880 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1881 *
1882 * @returns Strict VBox status code.
1883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1884 * @param pu64 Where to return the opcode qword.
1885 */
1886VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1887{
1888 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1889 if (rcStrict == VINF_SUCCESS)
1890 {
1891 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1892 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1893 pVCpu->iem.s.abOpcode[offOpcode + 1],
1894 pVCpu->iem.s.abOpcode[offOpcode + 2],
1895 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1896 pVCpu->iem.s.offOpcode = offOpcode + 4;
1897 }
1898 else
1899 *pu64 = 0;
1900 return rcStrict;
1901}
1902
1903#endif /* !IEM_WITH_SETJMP */
1904
1905#ifndef IEM_WITH_SETJMP
1906
1907/**
1908 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1909 *
1910 * @returns Strict VBox status code.
1911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1912 * @param pu64 Where to return the opcode qword.
1913 */
1914VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1915{
1916 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1917 if (rcStrict == VINF_SUCCESS)
1918 {
1919 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1920# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1921 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1922# else
1923 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1924 pVCpu->iem.s.abOpcode[offOpcode + 1],
1925 pVCpu->iem.s.abOpcode[offOpcode + 2],
1926 pVCpu->iem.s.abOpcode[offOpcode + 3],
1927 pVCpu->iem.s.abOpcode[offOpcode + 4],
1928 pVCpu->iem.s.abOpcode[offOpcode + 5],
1929 pVCpu->iem.s.abOpcode[offOpcode + 6],
1930 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1931# endif
1932 pVCpu->iem.s.offOpcode = offOpcode + 8;
1933 }
1934 else
1935 *pu64 = 0;
1936 return rcStrict;
1937}
1938
1939#else /* IEM_WITH_SETJMP */
1940
1941/**
1942 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1943 *
1944 * @returns The opcode qword.
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 */
1947uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1948{
1949# ifdef IEM_WITH_CODE_TLB
1950 uint64_t u64;
1951 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1952 return u64;
1953# else
1954 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1955 if (rcStrict == VINF_SUCCESS)
1956 {
1957 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1958 pVCpu->iem.s.offOpcode = offOpcode + 8;
1959# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1960 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1961# else
1962 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1963 pVCpu->iem.s.abOpcode[offOpcode + 1],
1964 pVCpu->iem.s.abOpcode[offOpcode + 2],
1965 pVCpu->iem.s.abOpcode[offOpcode + 3],
1966 pVCpu->iem.s.abOpcode[offOpcode + 4],
1967 pVCpu->iem.s.abOpcode[offOpcode + 5],
1968 pVCpu->iem.s.abOpcode[offOpcode + 6],
1969 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1970# endif
1971 }
1972 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1973# endif
1974}
1975
1976#endif /* IEM_WITH_SETJMP */
1977
1978
1979
1980/** @name Misc Worker Functions.
1981 * @{
1982 */
1983
1984/**
1985 * Gets the exception class for the specified exception vector.
1986 *
1987 * @returns The class of the specified exception.
1988 * @param uVector The exception vector.
1989 */
1990static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1991{
1992 Assert(uVector <= X86_XCPT_LAST);
1993 switch (uVector)
1994 {
1995 case X86_XCPT_DE:
1996 case X86_XCPT_TS:
1997 case X86_XCPT_NP:
1998 case X86_XCPT_SS:
1999 case X86_XCPT_GP:
2000 case X86_XCPT_SX: /* AMD only */
2001 return IEMXCPTCLASS_CONTRIBUTORY;
2002
2003 case X86_XCPT_PF:
2004 case X86_XCPT_VE: /* Intel only */
2005 return IEMXCPTCLASS_PAGE_FAULT;
2006
2007 case X86_XCPT_DF:
2008 return IEMXCPTCLASS_DOUBLE_FAULT;
2009 }
2010 return IEMXCPTCLASS_BENIGN;
2011}
2012
2013
2014/**
2015 * Evaluates how to handle an exception caused during delivery of another event
2016 * (exception / interrupt).
2017 *
2018 * @returns How to handle the recursive exception.
2019 * @param pVCpu The cross context virtual CPU structure of the
2020 * calling thread.
2021 * @param fPrevFlags The flags of the previous event.
2022 * @param uPrevVector The vector of the previous event.
2023 * @param fCurFlags The flags of the current exception.
2024 * @param uCurVector The vector of the current exception.
2025 * @param pfXcptRaiseInfo Where to store additional information about the
2026 * exception condition. Optional.
2027 */
2028VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2029 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2030{
2031 /*
2032 * Only CPU exceptions can be raised while delivering other events, software interrupt
2033 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2034 */
2035 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2036 Assert(pVCpu); RT_NOREF(pVCpu);
2037 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2038
2039 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2040 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2041 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2042 {
2043 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2044 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2045 {
2046 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2047 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2048 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2049 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2050 {
2051 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2052 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2053 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2054 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2055 uCurVector, pVCpu->cpum.GstCtx.cr2));
2056 }
2057 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2058 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2059 {
2060 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2061 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2062 }
2063 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2064 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2065 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2066 {
2067 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2068 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2069 }
2070 }
2071 else
2072 {
2073 if (uPrevVector == X86_XCPT_NMI)
2074 {
2075 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2076 if (uCurVector == X86_XCPT_PF)
2077 {
2078 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2079 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2080 }
2081 }
2082 else if ( uPrevVector == X86_XCPT_AC
2083 && uCurVector == X86_XCPT_AC)
2084 {
2085 enmRaise = IEMXCPTRAISE_CPU_HANG;
2086 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2087 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2088 }
2089 }
2090 }
2091 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2092 {
2093 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2094 if (uCurVector == X86_XCPT_PF)
2095 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2096 }
2097 else
2098 {
2099 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2100 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2101 }
2102
2103 if (pfXcptRaiseInfo)
2104 *pfXcptRaiseInfo = fRaiseInfo;
2105 return enmRaise;
2106}
2107
2108
2109/**
2110 * Enters the CPU shutdown state initiated by a triple fault or other
2111 * unrecoverable conditions.
2112 *
2113 * @returns Strict VBox status code.
2114 * @param pVCpu The cross context virtual CPU structure of the
2115 * calling thread.
2116 */
2117static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2118{
2119 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2120 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2121
2122 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2123 {
2124 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2125 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2126 }
2127
2128 RT_NOREF(pVCpu);
2129 return VINF_EM_TRIPLE_FAULT;
2130}
2131
2132
2133/**
2134 * Validates a new SS segment.
2135 *
2136 * @returns VBox strict status code.
2137 * @param pVCpu The cross context virtual CPU structure of the
2138 * calling thread.
2139 * @param NewSS The new SS selctor.
2140 * @param uCpl The CPL to load the stack for.
2141 * @param pDesc Where to return the descriptor.
2142 */
2143static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2144{
2145 /* Null selectors are not allowed (we're not called for dispatching
2146 interrupts with SS=0 in long mode). */
2147 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2148 {
2149 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2150 return iemRaiseTaskSwitchFault0(pVCpu);
2151 }
2152
2153 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2154 if ((NewSS & X86_SEL_RPL) != uCpl)
2155 {
2156 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2157 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2158 }
2159
2160 /*
2161 * Read the descriptor.
2162 */
2163 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2164 if (rcStrict != VINF_SUCCESS)
2165 return rcStrict;
2166
2167 /*
2168 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2169 */
2170 if (!pDesc->Legacy.Gen.u1DescType)
2171 {
2172 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2173 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2174 }
2175
2176 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2177 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2178 {
2179 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2180 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2181 }
2182 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2183 {
2184 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2185 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2186 }
2187
2188 /* Is it there? */
2189 /** @todo testcase: Is this checked before the canonical / limit check below? */
2190 if (!pDesc->Legacy.Gen.u1Present)
2191 {
2192 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2193 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2194 }
2195
2196 return VINF_SUCCESS;
2197}
2198
2199/** @} */
2200
2201
2202/** @name Raising Exceptions.
2203 *
2204 * @{
2205 */
2206
2207
2208/**
2209 * Loads the specified stack far pointer from the TSS.
2210 *
2211 * @returns VBox strict status code.
2212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2213 * @param uCpl The CPL to load the stack for.
2214 * @param pSelSS Where to return the new stack segment.
2215 * @param puEsp Where to return the new stack pointer.
2216 */
2217static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2218{
2219 VBOXSTRICTRC rcStrict;
2220 Assert(uCpl < 4);
2221
2222 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2223 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2224 {
2225 /*
2226 * 16-bit TSS (X86TSS16).
2227 */
2228 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2229 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2230 {
2231 uint32_t off = uCpl * 4 + 2;
2232 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2233 {
2234 /** @todo check actual access pattern here. */
2235 uint32_t u32Tmp = 0; /* gcc maybe... */
2236 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2237 if (rcStrict == VINF_SUCCESS)
2238 {
2239 *puEsp = RT_LOWORD(u32Tmp);
2240 *pSelSS = RT_HIWORD(u32Tmp);
2241 return VINF_SUCCESS;
2242 }
2243 }
2244 else
2245 {
2246 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2247 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2248 }
2249 break;
2250 }
2251
2252 /*
2253 * 32-bit TSS (X86TSS32).
2254 */
2255 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2256 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2257 {
2258 uint32_t off = uCpl * 8 + 4;
2259 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2260 {
2261/** @todo check actual access pattern here. */
2262 uint64_t u64Tmp;
2263 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2264 if (rcStrict == VINF_SUCCESS)
2265 {
2266 *puEsp = u64Tmp & UINT32_MAX;
2267 *pSelSS = (RTSEL)(u64Tmp >> 32);
2268 return VINF_SUCCESS;
2269 }
2270 }
2271 else
2272 {
2273 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2274 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2275 }
2276 break;
2277 }
2278
2279 default:
2280 AssertFailed();
2281 rcStrict = VERR_IEM_IPE_4;
2282 break;
2283 }
2284
2285 *puEsp = 0; /* make gcc happy */
2286 *pSelSS = 0; /* make gcc happy */
2287 return rcStrict;
2288}
2289
2290
2291/**
2292 * Loads the specified stack pointer from the 64-bit TSS.
2293 *
2294 * @returns VBox strict status code.
2295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2296 * @param uCpl The CPL to load the stack for.
2297 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2298 * @param puRsp Where to return the new stack pointer.
2299 */
2300static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2301{
2302 Assert(uCpl < 4);
2303 Assert(uIst < 8);
2304 *puRsp = 0; /* make gcc happy */
2305
2306 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2307 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2308
2309 uint32_t off;
2310 if (uIst)
2311 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2312 else
2313 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2314 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2315 {
2316 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2317 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2318 }
2319
2320 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2321}
2322
2323
2324/**
2325 * Adjust the CPU state according to the exception being raised.
2326 *
2327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2328 * @param u8Vector The exception that has been raised.
2329 */
2330DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2331{
2332 switch (u8Vector)
2333 {
2334 case X86_XCPT_DB:
2335 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2336 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2337 break;
2338 /** @todo Read the AMD and Intel exception reference... */
2339 }
2340}
2341
2342
2343/**
2344 * Implements exceptions and interrupts for real mode.
2345 *
2346 * @returns VBox strict status code.
2347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2348 * @param cbInstr The number of bytes to offset rIP by in the return
2349 * address.
2350 * @param u8Vector The interrupt / exception vector number.
2351 * @param fFlags The flags.
2352 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2353 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2354 */
2355static VBOXSTRICTRC
2356iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2357 uint8_t cbInstr,
2358 uint8_t u8Vector,
2359 uint32_t fFlags,
2360 uint16_t uErr,
2361 uint64_t uCr2) RT_NOEXCEPT
2362{
2363 NOREF(uErr); NOREF(uCr2);
2364 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2365
2366 /*
2367 * Read the IDT entry.
2368 */
2369 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2370 {
2371 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2372 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2373 }
2374 RTFAR16 Idte;
2375 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2376 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2377 {
2378 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2379 return rcStrict;
2380 }
2381
2382#ifdef LOG_ENABLED
2383 /* If software interrupt, try decode it if logging is enabled and such. */
2384 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2385 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2386 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2387#endif
2388
2389 /*
2390 * Push the stack frame.
2391 */
2392 uint8_t bUnmapInfo;
2393 uint16_t *pu16Frame;
2394 uint64_t uNewRsp;
2395 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2396 if (rcStrict != VINF_SUCCESS)
2397 return rcStrict;
2398
2399 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2400#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2401 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2402 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2403 fEfl |= UINT16_C(0xf000);
2404#endif
2405 pu16Frame[2] = (uint16_t)fEfl;
2406 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2407 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2408 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2409 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2410 return rcStrict;
2411
2412 /*
2413 * Load the vector address into cs:ip and make exception specific state
2414 * adjustments.
2415 */
2416 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2417 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2418 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2419 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2420 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2421 pVCpu->cpum.GstCtx.rip = Idte.off;
2422 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2423 IEMMISC_SET_EFL(pVCpu, fEfl);
2424
2425 /** @todo do we actually do this in real mode? */
2426 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2427 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2428
2429 /*
2430 * Deal with debug events that follows the exception and clear inhibit flags.
2431 */
2432 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2433 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2434 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2435 else
2436 {
2437 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2438 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2439 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2440 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2441 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2442 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2443 return iemRaiseDebugException(pVCpu);
2444 }
2445
2446 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2447 so best leave them alone in case we're in a weird kind of real mode... */
2448
2449 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2450}
2451
2452
2453/**
2454 * Loads a NULL data selector into when coming from V8086 mode.
2455 *
2456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2457 * @param pSReg Pointer to the segment register.
2458 */
2459DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2460{
2461 pSReg->Sel = 0;
2462 pSReg->ValidSel = 0;
2463 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2464 {
2465 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2466 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2467 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2468 }
2469 else
2470 {
2471 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2472 /** @todo check this on AMD-V */
2473 pSReg->u64Base = 0;
2474 pSReg->u32Limit = 0;
2475 }
2476}
2477
2478
2479/**
2480 * Loads a segment selector during a task switch in V8086 mode.
2481 *
2482 * @param pSReg Pointer to the segment register.
2483 * @param uSel The selector value to load.
2484 */
2485DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2486{
2487 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2488 pSReg->Sel = uSel;
2489 pSReg->ValidSel = uSel;
2490 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2491 pSReg->u64Base = uSel << 4;
2492 pSReg->u32Limit = 0xffff;
2493 pSReg->Attr.u = 0xf3;
2494}
2495
2496
2497/**
2498 * Loads a segment selector during a task switch in protected mode.
2499 *
2500 * In this task switch scenario, we would throw \#TS exceptions rather than
2501 * \#GPs.
2502 *
2503 * @returns VBox strict status code.
2504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2505 * @param pSReg Pointer to the segment register.
2506 * @param uSel The new selector value.
2507 *
2508 * @remarks This does _not_ handle CS or SS.
2509 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2510 */
2511static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2512{
2513 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2514
2515 /* Null data selector. */
2516 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2517 {
2518 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2519 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2520 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2521 return VINF_SUCCESS;
2522 }
2523
2524 /* Fetch the descriptor. */
2525 IEMSELDESC Desc;
2526 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2527 if (rcStrict != VINF_SUCCESS)
2528 {
2529 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2530 VBOXSTRICTRC_VAL(rcStrict)));
2531 return rcStrict;
2532 }
2533
2534 /* Must be a data segment or readable code segment. */
2535 if ( !Desc.Legacy.Gen.u1DescType
2536 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2537 {
2538 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2539 Desc.Legacy.Gen.u4Type));
2540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2541 }
2542
2543 /* Check privileges for data segments and non-conforming code segments. */
2544 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2545 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2546 {
2547 /* The RPL and the new CPL must be less than or equal to the DPL. */
2548 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2549 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2550 {
2551 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2552 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2553 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2554 }
2555 }
2556
2557 /* Is it there? */
2558 if (!Desc.Legacy.Gen.u1Present)
2559 {
2560 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2561 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2562 }
2563
2564 /* The base and limit. */
2565 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2566 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2567
2568 /*
2569 * Ok, everything checked out fine. Now set the accessed bit before
2570 * committing the result into the registers.
2571 */
2572 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2573 {
2574 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2575 if (rcStrict != VINF_SUCCESS)
2576 return rcStrict;
2577 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2578 }
2579
2580 /* Commit */
2581 pSReg->Sel = uSel;
2582 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2583 pSReg->u32Limit = cbLimit;
2584 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2585 pSReg->ValidSel = uSel;
2586 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2587 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2588 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2589
2590 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2591 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2592 return VINF_SUCCESS;
2593}
2594
2595
2596/**
2597 * Performs a task switch.
2598 *
2599 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2600 * caller is responsible for performing the necessary checks (like DPL, TSS
2601 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2602 * reference for JMP, CALL, IRET.
2603 *
2604 * If the task switch is the due to a software interrupt or hardware exception,
2605 * the caller is responsible for validating the TSS selector and descriptor. See
2606 * Intel Instruction reference for INT n.
2607 *
2608 * @returns VBox strict status code.
2609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2610 * @param enmTaskSwitch The cause of the task switch.
2611 * @param uNextEip The EIP effective after the task switch.
2612 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2613 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2614 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2615 * @param SelTss The TSS selector of the new task.
2616 * @param pNewDescTss Pointer to the new TSS descriptor.
2617 */
2618VBOXSTRICTRC
2619iemTaskSwitch(PVMCPUCC pVCpu,
2620 IEMTASKSWITCH enmTaskSwitch,
2621 uint32_t uNextEip,
2622 uint32_t fFlags,
2623 uint16_t uErr,
2624 uint64_t uCr2,
2625 RTSEL SelTss,
2626 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2627{
2628 Assert(!IEM_IS_REAL_MODE(pVCpu));
2629 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2630 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2631
2632 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2633 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2634 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2635 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2636 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2637
2638 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2639 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2640
2641 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2642 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2643
2644 /* Update CR2 in case it's a page-fault. */
2645 /** @todo This should probably be done much earlier in IEM/PGM. See
2646 * @bugref{5653#c49}. */
2647 if (fFlags & IEM_XCPT_FLAGS_CR2)
2648 pVCpu->cpum.GstCtx.cr2 = uCr2;
2649
2650 /*
2651 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2652 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2653 */
2654 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2655 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2656 if (uNewTssLimit < uNewTssLimitMin)
2657 {
2658 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2659 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2660 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2661 }
2662
2663 /*
2664 * Task switches in VMX non-root mode always cause task switches.
2665 * The new TSS must have been read and validated (DPL, limits etc.) before a
2666 * task-switch VM-exit commences.
2667 *
2668 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2669 */
2670 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2671 {
2672 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2673 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2674 }
2675
2676 /*
2677 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2678 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2679 */
2680 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2681 {
2682 uint64_t const uExitInfo1 = SelTss;
2683 uint64_t uExitInfo2 = uErr;
2684 switch (enmTaskSwitch)
2685 {
2686 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2687 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2688 default: break;
2689 }
2690 if (fFlags & IEM_XCPT_FLAGS_ERR)
2691 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2692 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2693 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2694
2695 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2696 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2697 RT_NOREF2(uExitInfo1, uExitInfo2);
2698 }
2699
2700 /*
2701 * Check the current TSS limit. The last written byte to the current TSS during the
2702 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2703 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2704 *
2705 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2706 * end up with smaller than "legal" TSS limits.
2707 */
2708 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2709 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2710 if (uCurTssLimit < uCurTssLimitMin)
2711 {
2712 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2713 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2714 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2715 }
2716
2717 /*
2718 * Verify that the new TSS can be accessed and map it. Map only the required contents
2719 * and not the entire TSS.
2720 */
2721 uint8_t bUnmapInfoNewTss;
2722 void *pvNewTss;
2723 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2724 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2725 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2726 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2727 * not perform correct translation if this happens. See Intel spec. 7.2.1
2728 * "Task-State Segment". */
2729 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2730/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2731 * Consider wrapping the remainder into a function for simpler cleanup. */
2732 if (rcStrict != VINF_SUCCESS)
2733 {
2734 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2735 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2736 return rcStrict;
2737 }
2738
2739 /*
2740 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2741 */
2742 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2743 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2744 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2745 {
2746 uint8_t bUnmapInfoDescCurTss;
2747 PX86DESC pDescCurTss;
2748 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2749 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2750 if (rcStrict != VINF_SUCCESS)
2751 {
2752 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2753 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2754 return rcStrict;
2755 }
2756
2757 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2758 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2759 if (rcStrict != VINF_SUCCESS)
2760 {
2761 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2762 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2763 return rcStrict;
2764 }
2765
2766 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2767 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2768 {
2769 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2770 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2771 fEFlags &= ~X86_EFL_NT;
2772 }
2773 }
2774
2775 /*
2776 * Save the CPU state into the current TSS.
2777 */
2778 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2779 if (GCPtrNewTss == GCPtrCurTss)
2780 {
2781 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2782 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2783 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2784 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2785 pVCpu->cpum.GstCtx.ldtr.Sel));
2786 }
2787 if (fIsNewTss386)
2788 {
2789 /*
2790 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2791 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2792 */
2793 uint8_t bUnmapInfoCurTss32;
2794 void *pvCurTss32;
2795 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2796 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2797 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2798 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2799 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2800 if (rcStrict != VINF_SUCCESS)
2801 {
2802 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2803 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2804 return rcStrict;
2805 }
2806
2807 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2808 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2809 pCurTss32->eip = uNextEip;
2810 pCurTss32->eflags = fEFlags;
2811 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2812 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2813 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2814 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2815 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2816 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2817 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2818 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2819 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2820 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2821 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2822 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2823 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2824 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2825
2826 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2827 if (rcStrict != VINF_SUCCESS)
2828 {
2829 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2830 VBOXSTRICTRC_VAL(rcStrict)));
2831 return rcStrict;
2832 }
2833 }
2834 else
2835 {
2836 /*
2837 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2838 */
2839 uint8_t bUnmapInfoCurTss16;
2840 void *pvCurTss16;
2841 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2842 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2843 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2844 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2845 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2846 if (rcStrict != VINF_SUCCESS)
2847 {
2848 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2849 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2850 return rcStrict;
2851 }
2852
2853 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2854 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2855 pCurTss16->ip = uNextEip;
2856 pCurTss16->flags = (uint16_t)fEFlags;
2857 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2858 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2859 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2860 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2861 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2862 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2863 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2864 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2865 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2866 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2867 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2868 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2869
2870 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2871 if (rcStrict != VINF_SUCCESS)
2872 {
2873 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2874 VBOXSTRICTRC_VAL(rcStrict)));
2875 return rcStrict;
2876 }
2877 }
2878
2879 /*
2880 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2881 */
2882 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2883 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2884 {
2885 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2886 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2887 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2888 }
2889
2890 /*
2891 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2892 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2893 */
2894 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2895 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2896 bool fNewDebugTrap;
2897 if (fIsNewTss386)
2898 {
2899 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2900 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2901 uNewEip = pNewTss32->eip;
2902 uNewEflags = pNewTss32->eflags;
2903 uNewEax = pNewTss32->eax;
2904 uNewEcx = pNewTss32->ecx;
2905 uNewEdx = pNewTss32->edx;
2906 uNewEbx = pNewTss32->ebx;
2907 uNewEsp = pNewTss32->esp;
2908 uNewEbp = pNewTss32->ebp;
2909 uNewEsi = pNewTss32->esi;
2910 uNewEdi = pNewTss32->edi;
2911 uNewES = pNewTss32->es;
2912 uNewCS = pNewTss32->cs;
2913 uNewSS = pNewTss32->ss;
2914 uNewDS = pNewTss32->ds;
2915 uNewFS = pNewTss32->fs;
2916 uNewGS = pNewTss32->gs;
2917 uNewLdt = pNewTss32->selLdt;
2918 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2919 }
2920 else
2921 {
2922 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2923 uNewCr3 = 0;
2924 uNewEip = pNewTss16->ip;
2925 uNewEflags = pNewTss16->flags;
2926 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2927 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2928 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2929 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2930 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2931 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2932 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2933 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2934 uNewES = pNewTss16->es;
2935 uNewCS = pNewTss16->cs;
2936 uNewSS = pNewTss16->ss;
2937 uNewDS = pNewTss16->ds;
2938 uNewFS = 0;
2939 uNewGS = 0;
2940 uNewLdt = pNewTss16->selLdt;
2941 fNewDebugTrap = false;
2942 }
2943
2944 if (GCPtrNewTss == GCPtrCurTss)
2945 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2946 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2947
2948 /*
2949 * We're done accessing the new TSS.
2950 */
2951 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2952 if (rcStrict != VINF_SUCCESS)
2953 {
2954 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2955 return rcStrict;
2956 }
2957
2958 /*
2959 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2960 */
2961 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2962 {
2963 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2964 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2965 if (rcStrict != VINF_SUCCESS)
2966 {
2967 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2968 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2969 return rcStrict;
2970 }
2971
2972 /* Check that the descriptor indicates the new TSS is available (not busy). */
2973 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2974 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2975 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2976
2977 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2978 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2979 if (rcStrict != VINF_SUCCESS)
2980 {
2981 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2982 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2983 return rcStrict;
2984 }
2985 }
2986
2987 /*
2988 * From this point on, we're technically in the new task. We will defer exceptions
2989 * until the completion of the task switch but before executing any instructions in the new task.
2990 */
2991 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2992 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2993 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2994 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2995 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2996 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2997 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2998
2999 /* Set the busy bit in TR. */
3000 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3001
3002 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3003 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3004 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3005 {
3006 uNewEflags |= X86_EFL_NT;
3007 }
3008
3009 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3010 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3011 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3012
3013 pVCpu->cpum.GstCtx.eip = uNewEip;
3014 pVCpu->cpum.GstCtx.eax = uNewEax;
3015 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3016 pVCpu->cpum.GstCtx.edx = uNewEdx;
3017 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3018 pVCpu->cpum.GstCtx.esp = uNewEsp;
3019 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3020 pVCpu->cpum.GstCtx.esi = uNewEsi;
3021 pVCpu->cpum.GstCtx.edi = uNewEdi;
3022
3023 uNewEflags &= X86_EFL_LIVE_MASK;
3024 uNewEflags |= X86_EFL_RA1_MASK;
3025 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3026
3027 /*
3028 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3029 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3030 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3031 */
3032 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3033 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3034
3035 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3036 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3037
3038 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3039 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3040
3041 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3042 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3043
3044 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3045 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3046
3047 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3048 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3049 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3050
3051 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3052 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3053 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3054 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3055
3056 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3057 {
3058 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3059 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3060 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3061 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3062 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3063 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3064 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3065 }
3066
3067 /*
3068 * Switch CR3 for the new task.
3069 */
3070 if ( fIsNewTss386
3071 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3072 {
3073 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3074 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3075 AssertRCSuccessReturn(rc, rc);
3076
3077 /* Inform PGM. */
3078 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3079 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3080 AssertRCReturn(rc, rc);
3081 /* ignore informational status codes */
3082
3083 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3084 }
3085
3086 /*
3087 * Switch LDTR for the new task.
3088 */
3089 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3090 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3091 else
3092 {
3093 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3094
3095 IEMSELDESC DescNewLdt;
3096 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3097 if (rcStrict != VINF_SUCCESS)
3098 {
3099 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3100 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3101 return rcStrict;
3102 }
3103 if ( !DescNewLdt.Legacy.Gen.u1Present
3104 || DescNewLdt.Legacy.Gen.u1DescType
3105 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3106 {
3107 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3108 uNewLdt, DescNewLdt.Legacy.u));
3109 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3110 }
3111
3112 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3113 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3114 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3115 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3116 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3117 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3118 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3120 }
3121
3122 IEMSELDESC DescSS;
3123 if (IEM_IS_V86_MODE(pVCpu))
3124 {
3125 IEM_SET_CPL(pVCpu, 3);
3126 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3127 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3128 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3129 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3130 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3131 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3132
3133 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3134 DescSS.Legacy.u = 0;
3135 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3136 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3137 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3138 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3139 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3140 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3141 DescSS.Legacy.Gen.u2Dpl = 3;
3142 }
3143 else
3144 {
3145 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3146
3147 /*
3148 * Load the stack segment for the new task.
3149 */
3150 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3151 {
3152 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3153 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3154 }
3155
3156 /* Fetch the descriptor. */
3157 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3158 if (rcStrict != VINF_SUCCESS)
3159 {
3160 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3161 VBOXSTRICTRC_VAL(rcStrict)));
3162 return rcStrict;
3163 }
3164
3165 /* SS must be a data segment and writable. */
3166 if ( !DescSS.Legacy.Gen.u1DescType
3167 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3168 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3169 {
3170 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3171 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3172 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3173 }
3174
3175 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3176 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3177 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3178 {
3179 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3180 uNewCpl));
3181 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3182 }
3183
3184 /* Is it there? */
3185 if (!DescSS.Legacy.Gen.u1Present)
3186 {
3187 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3188 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3189 }
3190
3191 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3192 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3193
3194 /* Set the accessed bit before committing the result into SS. */
3195 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3196 {
3197 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3198 if (rcStrict != VINF_SUCCESS)
3199 return rcStrict;
3200 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3201 }
3202
3203 /* Commit SS. */
3204 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3205 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3206 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3207 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3208 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3209 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3211
3212 /* CPL has changed, update IEM before loading rest of segments. */
3213 IEM_SET_CPL(pVCpu, uNewCpl);
3214
3215 /*
3216 * Load the data segments for the new task.
3217 */
3218 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3219 if (rcStrict != VINF_SUCCESS)
3220 return rcStrict;
3221 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3225 if (rcStrict != VINF_SUCCESS)
3226 return rcStrict;
3227 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3228 if (rcStrict != VINF_SUCCESS)
3229 return rcStrict;
3230
3231 /*
3232 * Load the code segment for the new task.
3233 */
3234 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3235 {
3236 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3237 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3238 }
3239
3240 /* Fetch the descriptor. */
3241 IEMSELDESC DescCS;
3242 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3243 if (rcStrict != VINF_SUCCESS)
3244 {
3245 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3246 return rcStrict;
3247 }
3248
3249 /* CS must be a code segment. */
3250 if ( !DescCS.Legacy.Gen.u1DescType
3251 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3252 {
3253 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3254 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3255 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3256 }
3257
3258 /* For conforming CS, DPL must be less than or equal to the RPL. */
3259 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3260 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3261 {
3262 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3263 DescCS.Legacy.Gen.u2Dpl));
3264 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3265 }
3266
3267 /* For non-conforming CS, DPL must match RPL. */
3268 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3269 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3270 {
3271 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3272 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3273 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3274 }
3275
3276 /* Is it there? */
3277 if (!DescCS.Legacy.Gen.u1Present)
3278 {
3279 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3281 }
3282
3283 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3284 u64Base = X86DESC_BASE(&DescCS.Legacy);
3285
3286 /* Set the accessed bit before committing the result into CS. */
3287 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3288 {
3289 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3290 if (rcStrict != VINF_SUCCESS)
3291 return rcStrict;
3292 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3293 }
3294
3295 /* Commit CS. */
3296 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3297 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3298 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3299 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3300 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3301 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3303 }
3304
3305 /* Make sure the CPU mode is correct. */
3306 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3307 if (fExecNew != pVCpu->iem.s.fExec)
3308 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3309 pVCpu->iem.s.fExec = fExecNew;
3310
3311 /** @todo Debug trap. */
3312 if (fIsNewTss386 && fNewDebugTrap)
3313 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3314
3315 /*
3316 * Construct the error code masks based on what caused this task switch.
3317 * See Intel Instruction reference for INT.
3318 */
3319 uint16_t uExt;
3320 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3321 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3322 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3323 uExt = 1;
3324 else
3325 uExt = 0;
3326
3327 /*
3328 * Push any error code on to the new stack.
3329 */
3330 if (fFlags & IEM_XCPT_FLAGS_ERR)
3331 {
3332 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3333 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3334 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3335
3336 /* Check that there is sufficient space on the stack. */
3337 /** @todo Factor out segment limit checking for normal/expand down segments
3338 * into a separate function. */
3339 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3340 {
3341 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3342 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3343 {
3344 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3345 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3346 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3347 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3348 }
3349 }
3350 else
3351 {
3352 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3353 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3354 {
3355 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3356 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3357 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3358 }
3359 }
3360
3361
3362 if (fIsNewTss386)
3363 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3364 else
3365 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3366 if (rcStrict != VINF_SUCCESS)
3367 {
3368 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3369 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3370 return rcStrict;
3371 }
3372 }
3373
3374 /* Check the new EIP against the new CS limit. */
3375 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3376 {
3377 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3378 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3379 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3380 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3381 }
3382
3383 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3384 pVCpu->cpum.GstCtx.ss.Sel));
3385 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3386}
3387
3388
3389/**
3390 * Implements exceptions and interrupts for protected mode.
3391 *
3392 * @returns VBox strict status code.
3393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3394 * @param cbInstr The number of bytes to offset rIP by in the return
3395 * address.
3396 * @param u8Vector The interrupt / exception vector number.
3397 * @param fFlags The flags.
3398 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3399 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3400 */
3401static VBOXSTRICTRC
3402iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3403 uint8_t cbInstr,
3404 uint8_t u8Vector,
3405 uint32_t fFlags,
3406 uint16_t uErr,
3407 uint64_t uCr2) RT_NOEXCEPT
3408{
3409 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3410
3411 /*
3412 * Read the IDT entry.
3413 */
3414 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3415 {
3416 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3417 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3418 }
3419 X86DESC Idte;
3420 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3421 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3422 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3423 {
3424 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3425 return rcStrict;
3426 }
3427 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3428 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3429 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3430 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3431
3432 /*
3433 * Check the descriptor type, DPL and such.
3434 * ASSUMES this is done in the same order as described for call-gate calls.
3435 */
3436 if (Idte.Gate.u1DescType)
3437 {
3438 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3439 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3440 }
3441 bool fTaskGate = false;
3442 uint8_t f32BitGate = true;
3443 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3444 switch (Idte.Gate.u4Type)
3445 {
3446 case X86_SEL_TYPE_SYS_UNDEFINED:
3447 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3448 case X86_SEL_TYPE_SYS_LDT:
3449 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3450 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3451 case X86_SEL_TYPE_SYS_UNDEFINED2:
3452 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3453 case X86_SEL_TYPE_SYS_UNDEFINED3:
3454 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3455 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3456 case X86_SEL_TYPE_SYS_UNDEFINED4:
3457 {
3458 /** @todo check what actually happens when the type is wrong...
3459 * esp. call gates. */
3460 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3461 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3462 }
3463
3464 case X86_SEL_TYPE_SYS_286_INT_GATE:
3465 f32BitGate = false;
3466 RT_FALL_THRU();
3467 case X86_SEL_TYPE_SYS_386_INT_GATE:
3468 fEflToClear |= X86_EFL_IF;
3469 break;
3470
3471 case X86_SEL_TYPE_SYS_TASK_GATE:
3472 fTaskGate = true;
3473#ifndef IEM_IMPLEMENTS_TASKSWITCH
3474 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3475#endif
3476 break;
3477
3478 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3479 f32BitGate = false;
3480 break;
3481 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3482 break;
3483
3484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3485 }
3486
3487 /* Check DPL against CPL if applicable. */
3488 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3489 {
3490 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3491 {
3492 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3493 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3494 }
3495 }
3496
3497 /* Is it there? */
3498 if (!Idte.Gate.u1Present)
3499 {
3500 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3501 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3502 }
3503
3504 /* Is it a task-gate? */
3505 if (fTaskGate)
3506 {
3507 /*
3508 * Construct the error code masks based on what caused this task switch.
3509 * See Intel Instruction reference for INT.
3510 */
3511 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3512 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3513 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3514 RTSEL SelTss = Idte.Gate.u16Sel;
3515
3516 /*
3517 * Fetch the TSS descriptor in the GDT.
3518 */
3519 IEMSELDESC DescTSS;
3520 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3521 if (rcStrict != VINF_SUCCESS)
3522 {
3523 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3524 VBOXSTRICTRC_VAL(rcStrict)));
3525 return rcStrict;
3526 }
3527
3528 /* The TSS descriptor must be a system segment and be available (not busy). */
3529 if ( DescTSS.Legacy.Gen.u1DescType
3530 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3531 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3532 {
3533 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3534 u8Vector, SelTss, DescTSS.Legacy.au64));
3535 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3536 }
3537
3538 /* The TSS must be present. */
3539 if (!DescTSS.Legacy.Gen.u1Present)
3540 {
3541 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3542 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3543 }
3544
3545 /* Do the actual task switch. */
3546 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3547 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3548 fFlags, uErr, uCr2, SelTss, &DescTSS);
3549 }
3550
3551 /* A null CS is bad. */
3552 RTSEL NewCS = Idte.Gate.u16Sel;
3553 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3554 {
3555 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3556 return iemRaiseGeneralProtectionFault0(pVCpu);
3557 }
3558
3559 /* Fetch the descriptor for the new CS. */
3560 IEMSELDESC DescCS;
3561 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3562 if (rcStrict != VINF_SUCCESS)
3563 {
3564 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3565 return rcStrict;
3566 }
3567
3568 /* Must be a code segment. */
3569 if (!DescCS.Legacy.Gen.u1DescType)
3570 {
3571 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3572 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3573 }
3574 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3575 {
3576 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3577 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3578 }
3579
3580 /* Don't allow lowering the privilege level. */
3581 /** @todo Does the lowering of privileges apply to software interrupts
3582 * only? This has bearings on the more-privileged or
3583 * same-privilege stack behavior further down. A testcase would
3584 * be nice. */
3585 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3586 {
3587 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3588 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3589 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3590 }
3591
3592 /* Make sure the selector is present. */
3593 if (!DescCS.Legacy.Gen.u1Present)
3594 {
3595 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3596 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3597 }
3598
3599#ifdef LOG_ENABLED
3600 /* If software interrupt, try decode it if logging is enabled and such. */
3601 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3602 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3603 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3604#endif
3605
3606 /* Check the new EIP against the new CS limit. */
3607 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3608 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3609 ? Idte.Gate.u16OffsetLow
3610 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3611 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3612 if (uNewEip > cbLimitCS)
3613 {
3614 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3615 u8Vector, uNewEip, cbLimitCS, NewCS));
3616 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3617 }
3618 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3619
3620 /* Calc the flag image to push. */
3621 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3622 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3623 fEfl &= ~X86_EFL_RF;
3624 else
3625 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3626
3627 /* From V8086 mode only go to CPL 0. */
3628 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3629 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3630 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3631 {
3632 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3633 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3634 }
3635
3636 /*
3637 * If the privilege level changes, we need to get a new stack from the TSS.
3638 * This in turns means validating the new SS and ESP...
3639 */
3640 if (uNewCpl != IEM_GET_CPL(pVCpu))
3641 {
3642 RTSEL NewSS;
3643 uint32_t uNewEsp;
3644 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3645 if (rcStrict != VINF_SUCCESS)
3646 return rcStrict;
3647
3648 IEMSELDESC DescSS;
3649 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3650 if (rcStrict != VINF_SUCCESS)
3651 return rcStrict;
3652 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3653 if (!DescSS.Legacy.Gen.u1DefBig)
3654 {
3655 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3656 uNewEsp = (uint16_t)uNewEsp;
3657 }
3658
3659 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3660
3661 /* Check that there is sufficient space for the stack frame. */
3662 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3663 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3664 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3665 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3666
3667 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3668 {
3669 if ( uNewEsp - 1 > cbLimitSS
3670 || uNewEsp < cbStackFrame)
3671 {
3672 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3673 u8Vector, NewSS, uNewEsp, cbStackFrame));
3674 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3675 }
3676 }
3677 else
3678 {
3679 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3680 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3681 {
3682 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3683 u8Vector, NewSS, uNewEsp, cbStackFrame));
3684 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3685 }
3686 }
3687
3688 /*
3689 * Start making changes.
3690 */
3691
3692 /* Set the new CPL so that stack accesses use it. */
3693 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3694 IEM_SET_CPL(pVCpu, uNewCpl);
3695
3696 /* Create the stack frame. */
3697 uint8_t bUnmapInfoStackFrame;
3698 RTPTRUNION uStackFrame;
3699 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3700 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3701 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3702 if (rcStrict != VINF_SUCCESS)
3703 return rcStrict;
3704 if (f32BitGate)
3705 {
3706 if (fFlags & IEM_XCPT_FLAGS_ERR)
3707 *uStackFrame.pu32++ = uErr;
3708 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3709 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3710 uStackFrame.pu32[2] = fEfl;
3711 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3712 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3713 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3714 if (fEfl & X86_EFL_VM)
3715 {
3716 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3717 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3718 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3719 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3720 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3721 }
3722 }
3723 else
3724 {
3725 if (fFlags & IEM_XCPT_FLAGS_ERR)
3726 *uStackFrame.pu16++ = uErr;
3727 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3728 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3729 uStackFrame.pu16[2] = fEfl;
3730 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3731 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3732 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3733 if (fEfl & X86_EFL_VM)
3734 {
3735 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3736 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3737 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3738 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3739 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3740 }
3741 }
3742 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /* Mark the selectors 'accessed' (hope this is the correct time). */
3747 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3748 * after pushing the stack frame? (Write protect the gdt + stack to
3749 * find out.) */
3750 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3751 {
3752 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3756 }
3757
3758 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3759 {
3760 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3761 if (rcStrict != VINF_SUCCESS)
3762 return rcStrict;
3763 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3764 }
3765
3766 /*
3767 * Start comitting the register changes (joins with the DPL=CPL branch).
3768 */
3769 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3770 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3771 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3772 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3773 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3774 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3775 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3776 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3777 * SP is loaded).
3778 * Need to check the other combinations too:
3779 * - 16-bit TSS, 32-bit handler
3780 * - 32-bit TSS, 16-bit handler */
3781 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3782 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3783 else
3784 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3785
3786 if (fEfl & X86_EFL_VM)
3787 {
3788 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3789 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3790 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3791 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3792 }
3793 }
3794 /*
3795 * Same privilege, no stack change and smaller stack frame.
3796 */
3797 else
3798 {
3799 uint64_t uNewRsp;
3800 uint8_t bUnmapInfoStackFrame;
3801 RTPTRUNION uStackFrame;
3802 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3803 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3804 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3805 if (rcStrict != VINF_SUCCESS)
3806 return rcStrict;
3807
3808 if (f32BitGate)
3809 {
3810 if (fFlags & IEM_XCPT_FLAGS_ERR)
3811 *uStackFrame.pu32++ = uErr;
3812 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3813 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3814 uStackFrame.pu32[2] = fEfl;
3815 }
3816 else
3817 {
3818 if (fFlags & IEM_XCPT_FLAGS_ERR)
3819 *uStackFrame.pu16++ = uErr;
3820 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3821 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3822 uStackFrame.pu16[2] = fEfl;
3823 }
3824 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3825 if (rcStrict != VINF_SUCCESS)
3826 return rcStrict;
3827
3828 /* Mark the CS selector as 'accessed'. */
3829 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3830 {
3831 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3832 if (rcStrict != VINF_SUCCESS)
3833 return rcStrict;
3834 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3835 }
3836
3837 /*
3838 * Start committing the register changes (joins with the other branch).
3839 */
3840 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3841 }
3842
3843 /* ... register committing continues. */
3844 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3845 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3846 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3847 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3848 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3849 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3850
3851 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3852 fEfl &= ~fEflToClear;
3853 IEMMISC_SET_EFL(pVCpu, fEfl);
3854
3855 if (fFlags & IEM_XCPT_FLAGS_CR2)
3856 pVCpu->cpum.GstCtx.cr2 = uCr2;
3857
3858 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3859 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3860
3861 /* Make sure the execution flags are correct. */
3862 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3863 if (fExecNew != pVCpu->iem.s.fExec)
3864 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3865 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3866 pVCpu->iem.s.fExec = fExecNew;
3867 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3868
3869 /*
3870 * Deal with debug events that follows the exception and clear inhibit flags.
3871 */
3872 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3873 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3874 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3875 else
3876 {
3877 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3878 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3879 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3880 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3881 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3882 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3883 return iemRaiseDebugException(pVCpu);
3884 }
3885
3886 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3887}
3888
3889
3890/**
3891 * Implements exceptions and interrupts for long mode.
3892 *
3893 * @returns VBox strict status code.
3894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3895 * @param cbInstr The number of bytes to offset rIP by in the return
3896 * address.
3897 * @param u8Vector The interrupt / exception vector number.
3898 * @param fFlags The flags.
3899 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3900 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3901 */
3902static VBOXSTRICTRC
3903iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3904 uint8_t cbInstr,
3905 uint8_t u8Vector,
3906 uint32_t fFlags,
3907 uint16_t uErr,
3908 uint64_t uCr2) RT_NOEXCEPT
3909{
3910 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3911
3912 /*
3913 * Read the IDT entry.
3914 */
3915 uint16_t offIdt = (uint16_t)u8Vector << 4;
3916 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3917 {
3918 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3919 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3920 }
3921 X86DESC64 Idte;
3922#ifdef _MSC_VER /* Shut up silly compiler warning. */
3923 Idte.au64[0] = 0;
3924 Idte.au64[1] = 0;
3925#endif
3926 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3928 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3929 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3930 {
3931 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3932 return rcStrict;
3933 }
3934 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3935 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3936 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3937
3938 /*
3939 * Check the descriptor type, DPL and such.
3940 * ASSUMES this is done in the same order as described for call-gate calls.
3941 */
3942 if (Idte.Gate.u1DescType)
3943 {
3944 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3945 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3946 }
3947 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3948 switch (Idte.Gate.u4Type)
3949 {
3950 case AMD64_SEL_TYPE_SYS_INT_GATE:
3951 fEflToClear |= X86_EFL_IF;
3952 break;
3953 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3954 break;
3955
3956 default:
3957 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3958 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3959 }
3960
3961 /* Check DPL against CPL if applicable. */
3962 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3963 {
3964 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3965 {
3966 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3967 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3968 }
3969 }
3970
3971 /* Is it there? */
3972 if (!Idte.Gate.u1Present)
3973 {
3974 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3975 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3976 }
3977
3978 /* A null CS is bad. */
3979 RTSEL NewCS = Idte.Gate.u16Sel;
3980 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3981 {
3982 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3983 return iemRaiseGeneralProtectionFault0(pVCpu);
3984 }
3985
3986 /* Fetch the descriptor for the new CS. */
3987 IEMSELDESC DescCS;
3988 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3989 if (rcStrict != VINF_SUCCESS)
3990 {
3991 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3992 return rcStrict;
3993 }
3994
3995 /* Must be a 64-bit code segment. */
3996 if (!DescCS.Long.Gen.u1DescType)
3997 {
3998 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3999 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4000 }
4001 if ( !DescCS.Long.Gen.u1Long
4002 || DescCS.Long.Gen.u1DefBig
4003 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4004 {
4005 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4006 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4007 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4008 }
4009
4010 /* Don't allow lowering the privilege level. For non-conforming CS
4011 selectors, the CS.DPL sets the privilege level the trap/interrupt
4012 handler runs at. For conforming CS selectors, the CPL remains
4013 unchanged, but the CS.DPL must be <= CPL. */
4014 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4015 * when CPU in Ring-0. Result \#GP? */
4016 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4017 {
4018 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4019 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4020 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4021 }
4022
4023
4024 /* Make sure the selector is present. */
4025 if (!DescCS.Legacy.Gen.u1Present)
4026 {
4027 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4028 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4029 }
4030
4031 /* Check that the new RIP is canonical. */
4032 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4033 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4034 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4035 if (!IEM_IS_CANONICAL(uNewRip))
4036 {
4037 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4038 return iemRaiseGeneralProtectionFault0(pVCpu);
4039 }
4040
4041 /*
4042 * If the privilege level changes or if the IST isn't zero, we need to get
4043 * a new stack from the TSS.
4044 */
4045 uint64_t uNewRsp;
4046 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4047 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4048 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4049 || Idte.Gate.u3IST != 0)
4050 {
4051 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4052 if (rcStrict != VINF_SUCCESS)
4053 return rcStrict;
4054 }
4055 else
4056 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4057 uNewRsp &= ~(uint64_t)0xf;
4058
4059 /*
4060 * Calc the flag image to push.
4061 */
4062 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4063 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4064 fEfl &= ~X86_EFL_RF;
4065 else
4066 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4067
4068 /*
4069 * Start making changes.
4070 */
4071 /* Set the new CPL so that stack accesses use it. */
4072 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4073 IEM_SET_CPL(pVCpu, uNewCpl);
4074/** @todo Setting CPL this early seems wrong as it would affect and errors we
4075 * raise accessing the stack and (?) GDT/LDT... */
4076
4077 /* Create the stack frame. */
4078 uint8_t bUnmapInfoStackFrame;
4079 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4080 RTPTRUNION uStackFrame;
4081 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4082 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4083 if (rcStrict != VINF_SUCCESS)
4084 return rcStrict;
4085
4086 if (fFlags & IEM_XCPT_FLAGS_ERR)
4087 *uStackFrame.pu64++ = uErr;
4088 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4089 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4090 uStackFrame.pu64[2] = fEfl;
4091 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4092 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4093 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4094 if (rcStrict != VINF_SUCCESS)
4095 return rcStrict;
4096
4097 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4098 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4099 * after pushing the stack frame? (Write protect the gdt + stack to
4100 * find out.) */
4101 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4102 {
4103 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4104 if (rcStrict != VINF_SUCCESS)
4105 return rcStrict;
4106 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4107 }
4108
4109 /*
4110 * Start comitting the register changes.
4111 */
4112 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4113 * hidden registers when interrupting 32-bit or 16-bit code! */
4114 if (uNewCpl != uOldCpl)
4115 {
4116 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4117 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4118 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4119 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4120 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4121 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4122 }
4123 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4124 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4125 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4126 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4127 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4128 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4129 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4130 pVCpu->cpum.GstCtx.rip = uNewRip;
4131
4132 fEfl &= ~fEflToClear;
4133 IEMMISC_SET_EFL(pVCpu, fEfl);
4134
4135 if (fFlags & IEM_XCPT_FLAGS_CR2)
4136 pVCpu->cpum.GstCtx.cr2 = uCr2;
4137
4138 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4139 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4140
4141 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4142
4143 /*
4144 * Deal with debug events that follows the exception and clear inhibit flags.
4145 */
4146 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4147 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4148 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4149 else
4150 {
4151 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4152 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4153 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4154 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4155 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4156 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4157 return iemRaiseDebugException(pVCpu);
4158 }
4159
4160 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4161}
4162
4163
4164/**
4165 * Implements exceptions and interrupts.
4166 *
4167 * All exceptions and interrupts goes thru this function!
4168 *
4169 * @returns VBox strict status code.
4170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4171 * @param cbInstr The number of bytes to offset rIP by in the return
4172 * address.
4173 * @param u8Vector The interrupt / exception vector number.
4174 * @param fFlags The flags.
4175 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4176 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4177 */
4178VBOXSTRICTRC
4179iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4180 uint8_t cbInstr,
4181 uint8_t u8Vector,
4182 uint32_t fFlags,
4183 uint16_t uErr,
4184 uint64_t uCr2) RT_NOEXCEPT
4185{
4186 /*
4187 * Get all the state that we might need here.
4188 */
4189 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4190 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4191
4192#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4193 /*
4194 * Flush prefetch buffer
4195 */
4196 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4197#endif
4198
4199 /*
4200 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4201 */
4202 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4203 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4204 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4205 | IEM_XCPT_FLAGS_BP_INSTR
4206 | IEM_XCPT_FLAGS_ICEBP_INSTR
4207 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4208 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4209 {
4210 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4211 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4212 u8Vector = X86_XCPT_GP;
4213 uErr = 0;
4214 }
4215
4216 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4217#ifdef DBGFTRACE_ENABLED
4218 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4219 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4220 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4221#endif
4222
4223 /*
4224 * Check if DBGF wants to intercept the exception.
4225 */
4226 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4227 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4228 { /* likely */ }
4229 else
4230 {
4231 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4232 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4233 if (rcStrict != VINF_SUCCESS)
4234 return rcStrict;
4235 }
4236
4237 /*
4238 * Evaluate whether NMI blocking should be in effect.
4239 * Normally, NMI blocking is in effect whenever we inject an NMI.
4240 */
4241 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4242 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4243
4244#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4245 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4246 {
4247 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4248 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4249 return rcStrict0;
4250
4251 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4252 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4253 {
4254 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4255 fBlockNmi = false;
4256 }
4257 }
4258#endif
4259
4260#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4261 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4262 {
4263 /*
4264 * If the event is being injected as part of VMRUN, it isn't subject to event
4265 * intercepts in the nested-guest. However, secondary exceptions that occur
4266 * during injection of any event -are- subject to exception intercepts.
4267 *
4268 * See AMD spec. 15.20 "Event Injection".
4269 */
4270 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4271 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4272 else
4273 {
4274 /*
4275 * Check and handle if the event being raised is intercepted.
4276 */
4277 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4278 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4279 return rcStrict0;
4280 }
4281 }
4282#endif
4283
4284 /*
4285 * Set NMI blocking if necessary.
4286 */
4287 if (fBlockNmi)
4288 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4289
4290 /*
4291 * Do recursion accounting.
4292 */
4293 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4294 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4295 if (pVCpu->iem.s.cXcptRecursions == 0)
4296 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4297 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4298 else
4299 {
4300 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4301 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4302 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4303
4304 if (pVCpu->iem.s.cXcptRecursions >= 4)
4305 {
4306#ifdef DEBUG_bird
4307 AssertFailed();
4308#endif
4309 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4310 }
4311
4312 /*
4313 * Evaluate the sequence of recurring events.
4314 */
4315 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4316 NULL /* pXcptRaiseInfo */);
4317 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4318 { /* likely */ }
4319 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4320 {
4321 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4322 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4323 u8Vector = X86_XCPT_DF;
4324 uErr = 0;
4325#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4326 /* VMX nested-guest #DF intercept needs to be checked here. */
4327 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4328 {
4329 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4330 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4331 return rcStrict0;
4332 }
4333#endif
4334 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4335 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4336 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4337 }
4338 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4339 {
4340 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4341 return iemInitiateCpuShutdown(pVCpu);
4342 }
4343 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4344 {
4345 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4346 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4347 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4348 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4349 return VERR_EM_GUEST_CPU_HANG;
4350 }
4351 else
4352 {
4353 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4354 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4355 return VERR_IEM_IPE_9;
4356 }
4357
4358 /*
4359 * The 'EXT' bit is set when an exception occurs during deliver of an external
4360 * event (such as an interrupt or earlier exception)[1]. Privileged software
4361 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4362 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4363 *
4364 * [1] - Intel spec. 6.13 "Error Code"
4365 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4366 * [3] - Intel Instruction reference for INT n.
4367 */
4368 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4369 && (fFlags & IEM_XCPT_FLAGS_ERR)
4370 && u8Vector != X86_XCPT_PF
4371 && u8Vector != X86_XCPT_DF)
4372 {
4373 uErr |= X86_TRAP_ERR_EXTERNAL;
4374 }
4375 }
4376
4377 pVCpu->iem.s.cXcptRecursions++;
4378 pVCpu->iem.s.uCurXcpt = u8Vector;
4379 pVCpu->iem.s.fCurXcpt = fFlags;
4380 pVCpu->iem.s.uCurXcptErr = uErr;
4381 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4382
4383 /*
4384 * Extensive logging.
4385 */
4386#if defined(LOG_ENABLED) && defined(IN_RING3)
4387 if (LogIs3Enabled())
4388 {
4389 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4390 char szRegs[4096];
4391 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4392 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4393 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4394 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4395 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4396 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4397 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4398 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4399 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4400 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4401 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4402 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4403 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4404 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4405 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4406 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4407 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4408 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4409 " efer=%016VR{efer}\n"
4410 " pat=%016VR{pat}\n"
4411 " sf_mask=%016VR{sf_mask}\n"
4412 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4413 " lstar=%016VR{lstar}\n"
4414 " star=%016VR{star} cstar=%016VR{cstar}\n"
4415 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4416 );
4417
4418 char szInstr[256];
4419 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4420 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4421 szInstr, sizeof(szInstr), NULL);
4422 Log3(("%s%s\n", szRegs, szInstr));
4423 }
4424#endif /* LOG_ENABLED */
4425
4426 /*
4427 * Stats.
4428 */
4429 uint64_t const uTimestamp = ASMReadTSC();
4430 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4431 {
4432 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4433 EMHistoryAddExit(pVCpu,
4434 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4435 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4436 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4437 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4438 }
4439 else
4440 {
4441 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4442 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4443 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4444 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4445 if (fFlags & IEM_XCPT_FLAGS_ERR)
4446 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4447 if (fFlags & IEM_XCPT_FLAGS_CR2)
4448 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4449 }
4450
4451 /*
4452 * Hack alert! Convert incoming debug events to slient on Intel.
4453 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4454 */
4455 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4456 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4457 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4458 { /* ignore */ }
4459 else
4460 {
4461 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4462 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4463 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4464 | CPUMCTX_DBG_HIT_DRX_SILENT;
4465 }
4466
4467 /*
4468 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4469 * to ensure that a stale TLB or paging cache entry will only cause one
4470 * spurious #PF.
4471 */
4472 if ( u8Vector == X86_XCPT_PF
4473 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4474 IEMTlbInvalidatePage(pVCpu, uCr2);
4475
4476 /*
4477 * Call the mode specific worker function.
4478 */
4479 VBOXSTRICTRC rcStrict;
4480 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4481 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4482 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4483 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4484 else
4485 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4486
4487 /* Flush the prefetch buffer. */
4488 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4489
4490 /*
4491 * Unwind.
4492 */
4493 pVCpu->iem.s.cXcptRecursions--;
4494 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4495 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4496 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4497 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4498 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4499 return rcStrict;
4500}
4501
4502#ifdef IEM_WITH_SETJMP
4503/**
4504 * See iemRaiseXcptOrInt. Will not return.
4505 */
4506DECL_NO_RETURN(void)
4507iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4508 uint8_t cbInstr,
4509 uint8_t u8Vector,
4510 uint32_t fFlags,
4511 uint16_t uErr,
4512 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4513{
4514 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4515 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4516}
4517#endif
4518
4519
4520/** \#DE - 00. */
4521VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4522{
4523 if (GCMIsInterceptingXcptDE(pVCpu))
4524 {
4525 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4526 if (rc == VINF_SUCCESS)
4527 {
4528 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4529 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4530 }
4531 }
4532 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4533}
4534
4535
4536#ifdef IEM_WITH_SETJMP
4537/** \#DE - 00. */
4538DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4539{
4540 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4541}
4542#endif
4543
4544
4545/** \#DB - 01.
4546 * @note This automatically clear DR7.GD. */
4547VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4548{
4549 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4550 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4551 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4552}
4553
4554
4555/** \#BR - 05. */
4556VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4557{
4558 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4559}
4560
4561
4562/** \#UD - 06. */
4563VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4564{
4565 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4566}
4567
4568
4569#ifdef IEM_WITH_SETJMP
4570/** \#UD - 06. */
4571DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4572{
4573 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4574}
4575#endif
4576
4577
4578/** \#NM - 07. */
4579VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4580{
4581 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4582}
4583
4584
4585#ifdef IEM_WITH_SETJMP
4586/** \#NM - 07. */
4587DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4588{
4589 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4590}
4591#endif
4592
4593
4594/** \#TS(err) - 0a. */
4595VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4596{
4597 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4598}
4599
4600
4601/** \#TS(tr) - 0a. */
4602VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4603{
4604 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4605 pVCpu->cpum.GstCtx.tr.Sel, 0);
4606}
4607
4608
4609/** \#TS(0) - 0a. */
4610VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4611{
4612 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4613 0, 0);
4614}
4615
4616
4617/** \#TS(err) - 0a. */
4618VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4619{
4620 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4621 uSel & X86_SEL_MASK_OFF_RPL, 0);
4622}
4623
4624
4625/** \#NP(err) - 0b. */
4626VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4627{
4628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4629}
4630
4631
4632/** \#NP(sel) - 0b. */
4633VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4634{
4635 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4636 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4637 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4638 uSel & ~X86_SEL_RPL, 0);
4639}
4640
4641
4642/** \#SS(seg) - 0c. */
4643VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4644{
4645 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4646 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4647 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4648 uSel & ~X86_SEL_RPL, 0);
4649}
4650
4651
4652/** \#SS(err) - 0c. */
4653VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4654{
4655 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4656 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4658}
4659
4660
4661/** \#GP(n) - 0d. */
4662VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4663{
4664 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4666}
4667
4668
4669/** \#GP(0) - 0d. */
4670VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4671{
4672 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4674}
4675
4676#ifdef IEM_WITH_SETJMP
4677/** \#GP(0) - 0d. */
4678DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4679{
4680 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4681 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4682}
4683#endif
4684
4685
4686/** \#GP(sel) - 0d. */
4687VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4688{
4689 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4690 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4691 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4692 Sel & ~X86_SEL_RPL, 0);
4693}
4694
4695
4696/** \#GP(0) - 0d. */
4697VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4698{
4699 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4700 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4701}
4702
4703
4704/** \#GP(sel) - 0d. */
4705VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4706{
4707 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4708 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4709 NOREF(iSegReg); NOREF(fAccess);
4710 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4711 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4712}
4713
4714#ifdef IEM_WITH_SETJMP
4715/** \#GP(sel) - 0d, longjmp. */
4716DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4717{
4718 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4719 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4720 NOREF(iSegReg); NOREF(fAccess);
4721 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4722 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4723}
4724#endif
4725
4726/** \#GP(sel) - 0d. */
4727VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4728{
4729 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4730 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4731 NOREF(Sel);
4732 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4733}
4734
4735#ifdef IEM_WITH_SETJMP
4736/** \#GP(sel) - 0d, longjmp. */
4737DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4738{
4739 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4740 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4741 NOREF(Sel);
4742 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4743}
4744#endif
4745
4746
4747/** \#GP(sel) - 0d. */
4748VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4749{
4750 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4751 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4752 NOREF(iSegReg); NOREF(fAccess);
4753 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4754}
4755
4756#ifdef IEM_WITH_SETJMP
4757/** \#GP(sel) - 0d, longjmp. */
4758DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4759{
4760 NOREF(iSegReg); NOREF(fAccess);
4761 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4762}
4763#endif
4764
4765
4766/** \#PF(n) - 0e. */
4767VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4768{
4769 uint16_t uErr;
4770 switch (rc)
4771 {
4772 case VERR_PAGE_NOT_PRESENT:
4773 case VERR_PAGE_TABLE_NOT_PRESENT:
4774 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4775 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4776 uErr = 0;
4777 break;
4778
4779 case VERR_RESERVED_PAGE_TABLE_BITS:
4780 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4781 break;
4782
4783 default:
4784 AssertMsgFailed(("%Rrc\n", rc));
4785 RT_FALL_THRU();
4786 case VERR_ACCESS_DENIED:
4787 uErr = X86_TRAP_PF_P;
4788 break;
4789 }
4790
4791 if (IEM_GET_CPL(pVCpu) == 3)
4792 uErr |= X86_TRAP_PF_US;
4793
4794 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4795 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4796 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4797 uErr |= X86_TRAP_PF_ID;
4798
4799#if 0 /* This is so much non-sense, really. Why was it done like that? */
4800 /* Note! RW access callers reporting a WRITE protection fault, will clear
4801 the READ flag before calling. So, read-modify-write accesses (RW)
4802 can safely be reported as READ faults. */
4803 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4804 uErr |= X86_TRAP_PF_RW;
4805#else
4806 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4807 {
4808 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4809 /// (regardless of outcome of the comparison in the latter case).
4810 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4811 uErr |= X86_TRAP_PF_RW;
4812 }
4813#endif
4814
4815 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4816 of the memory operand rather than at the start of it. (Not sure what
4817 happens if it crosses a page boundrary.) The current heuristics for
4818 this is to report the #PF for the last byte if the access is more than
4819 64 bytes. This is probably not correct, but we can work that out later,
4820 main objective now is to get FXSAVE to work like for real hardware and
4821 make bs3-cpu-basic2 work. */
4822 if (cbAccess <= 64)
4823 { /* likely*/ }
4824 else
4825 GCPtrWhere += cbAccess - 1;
4826
4827 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4828 uErr, GCPtrWhere);
4829}
4830
4831#ifdef IEM_WITH_SETJMP
4832/** \#PF(n) - 0e, longjmp. */
4833DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4834 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4835{
4836 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4837}
4838#endif
4839
4840
4841/** \#MF(0) - 10. */
4842VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4843{
4844 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4846
4847 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4848 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4849 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4850}
4851
4852#ifdef IEM_WITH_SETJMP
4853/** \#MF(0) - 10, longjmp. */
4854DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4855{
4856 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4857}
4858#endif
4859
4860
4861/** \#AC(0) - 11. */
4862VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4863{
4864 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4865}
4866
4867#ifdef IEM_WITH_SETJMP
4868/** \#AC(0) - 11, longjmp. */
4869DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4870{
4871 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4872}
4873#endif
4874
4875
4876/** \#XF(0)/\#XM(0) - 19. */
4877VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4878{
4879 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4880}
4881
4882
4883#ifdef IEM_WITH_SETJMP
4884/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4885DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4886{
4887 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4888}
4889#endif
4890
4891
4892/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4893IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4894{
4895 NOREF(cbInstr);
4896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4897}
4898
4899
4900/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4901IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4902{
4903 NOREF(cbInstr);
4904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4905}
4906
4907
4908/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4909IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4910{
4911 NOREF(cbInstr);
4912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4913}
4914
4915
4916/** @} */
4917
4918/** @name Common opcode decoders.
4919 * @{
4920 */
4921//#include <iprt/mem.h>
4922
4923/**
4924 * Used to add extra details about a stub case.
4925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4926 */
4927void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4928{
4929#if defined(LOG_ENABLED) && defined(IN_RING3)
4930 PVM pVM = pVCpu->CTX_SUFF(pVM);
4931 char szRegs[4096];
4932 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4933 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4934 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4935 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4936 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4937 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4938 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4939 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4940 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4941 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4942 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4943 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4944 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4945 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4946 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4947 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4948 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4949 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4950 " efer=%016VR{efer}\n"
4951 " pat=%016VR{pat}\n"
4952 " sf_mask=%016VR{sf_mask}\n"
4953 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4954 " lstar=%016VR{lstar}\n"
4955 " star=%016VR{star} cstar=%016VR{cstar}\n"
4956 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4957 );
4958
4959 char szInstr[256];
4960 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4961 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4962 szInstr, sizeof(szInstr), NULL);
4963
4964 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4965#else
4966 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4967#endif
4968}
4969
4970/** @} */
4971
4972
4973
4974/** @name Register Access.
4975 * @{
4976 */
4977
4978/**
4979 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4980 *
4981 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4982 * segment limit.
4983 *
4984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4985 * @param cbInstr Instruction size.
4986 * @param offNextInstr The offset of the next instruction.
4987 * @param enmEffOpSize Effective operand size.
4988 */
4989VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4990 IEMMODE enmEffOpSize) RT_NOEXCEPT
4991{
4992 switch (enmEffOpSize)
4993 {
4994 case IEMMODE_16BIT:
4995 {
4996 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4997 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4998 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4999 pVCpu->cpum.GstCtx.rip = uNewIp;
5000 else
5001 return iemRaiseGeneralProtectionFault0(pVCpu);
5002 break;
5003 }
5004
5005 case IEMMODE_32BIT:
5006 {
5007 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5008 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5009
5010 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5011 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5012 pVCpu->cpum.GstCtx.rip = uNewEip;
5013 else
5014 return iemRaiseGeneralProtectionFault0(pVCpu);
5015 break;
5016 }
5017
5018 case IEMMODE_64BIT:
5019 {
5020 Assert(IEM_IS_64BIT_CODE(pVCpu));
5021
5022 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5023 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5024 pVCpu->cpum.GstCtx.rip = uNewRip;
5025 else
5026 return iemRaiseGeneralProtectionFault0(pVCpu);
5027 break;
5028 }
5029
5030 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5031 }
5032
5033#ifndef IEM_WITH_CODE_TLB
5034 /* Flush the prefetch buffer. */
5035 pVCpu->iem.s.cbOpcode = cbInstr;
5036#endif
5037
5038 /*
5039 * Clear RF and finish the instruction (maybe raise #DB).
5040 */
5041 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5042}
5043
5044
5045/**
5046 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5047 *
5048 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5049 * segment limit.
5050 *
5051 * @returns Strict VBox status code.
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param cbInstr Instruction size.
5054 * @param offNextInstr The offset of the next instruction.
5055 */
5056VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5057{
5058 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5059
5060 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5061 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5062 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5063 pVCpu->cpum.GstCtx.rip = uNewIp;
5064 else
5065 return iemRaiseGeneralProtectionFault0(pVCpu);
5066
5067#ifndef IEM_WITH_CODE_TLB
5068 /* Flush the prefetch buffer. */
5069 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5070#endif
5071
5072 /*
5073 * Clear RF and finish the instruction (maybe raise #DB).
5074 */
5075 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5076}
5077
5078
5079/**
5080 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5081 *
5082 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5083 * segment limit.
5084 *
5085 * @returns Strict VBox status code.
5086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5087 * @param cbInstr Instruction size.
5088 * @param offNextInstr The offset of the next instruction.
5089 * @param enmEffOpSize Effective operand size.
5090 */
5091VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5092 IEMMODE enmEffOpSize) RT_NOEXCEPT
5093{
5094 if (enmEffOpSize == IEMMODE_32BIT)
5095 {
5096 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5097
5098 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5099 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5100 pVCpu->cpum.GstCtx.rip = uNewEip;
5101 else
5102 return iemRaiseGeneralProtectionFault0(pVCpu);
5103 }
5104 else
5105 {
5106 Assert(enmEffOpSize == IEMMODE_64BIT);
5107
5108 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5109 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5110 pVCpu->cpum.GstCtx.rip = uNewRip;
5111 else
5112 return iemRaiseGeneralProtectionFault0(pVCpu);
5113 }
5114
5115#ifndef IEM_WITH_CODE_TLB
5116 /* Flush the prefetch buffer. */
5117 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5118#endif
5119
5120 /*
5121 * Clear RF and finish the instruction (maybe raise #DB).
5122 */
5123 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5124}
5125
5126/** @} */
5127
5128
5129/** @name FPU access and helpers.
5130 *
5131 * @{
5132 */
5133
5134/**
5135 * Updates the x87.DS and FPUDP registers.
5136 *
5137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5138 * @param pFpuCtx The FPU context.
5139 * @param iEffSeg The effective segment register.
5140 * @param GCPtrEff The effective address relative to @a iEffSeg.
5141 */
5142DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5143{
5144 RTSEL sel;
5145 switch (iEffSeg)
5146 {
5147 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5148 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5149 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5150 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5151 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5152 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5153 default:
5154 AssertMsgFailed(("%d\n", iEffSeg));
5155 sel = pVCpu->cpum.GstCtx.ds.Sel;
5156 }
5157 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5158 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5159 {
5160 pFpuCtx->DS = 0;
5161 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5162 }
5163 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5164 {
5165 pFpuCtx->DS = sel;
5166 pFpuCtx->FPUDP = GCPtrEff;
5167 }
5168 else
5169 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5170}
5171
5172
5173/**
5174 * Rotates the stack registers in the push direction.
5175 *
5176 * @param pFpuCtx The FPU context.
5177 * @remarks This is a complete waste of time, but fxsave stores the registers in
5178 * stack order.
5179 */
5180DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5181{
5182 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5183 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5184 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5185 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5186 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5187 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5188 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5189 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5190 pFpuCtx->aRegs[0].r80 = r80Tmp;
5191}
5192
5193
5194/**
5195 * Rotates the stack registers in the pop direction.
5196 *
5197 * @param pFpuCtx The FPU context.
5198 * @remarks This is a complete waste of time, but fxsave stores the registers in
5199 * stack order.
5200 */
5201DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5202{
5203 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5204 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5205 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5206 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5207 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5208 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5209 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5210 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5211 pFpuCtx->aRegs[7].r80 = r80Tmp;
5212}
5213
5214
5215/**
5216 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5217 * exception prevents it.
5218 *
5219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5220 * @param pResult The FPU operation result to push.
5221 * @param pFpuCtx The FPU context.
5222 */
5223static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5224{
5225 /* Update FSW and bail if there are pending exceptions afterwards. */
5226 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5227 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5228 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5229 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5230 {
5231 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5232 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5233 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5234 pFpuCtx->FSW = fFsw;
5235 return;
5236 }
5237
5238 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5239 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5240 {
5241 /* All is fine, push the actual value. */
5242 pFpuCtx->FTW |= RT_BIT(iNewTop);
5243 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5244 }
5245 else if (pFpuCtx->FCW & X86_FCW_IM)
5246 {
5247 /* Masked stack overflow, push QNaN. */
5248 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5249 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5250 }
5251 else
5252 {
5253 /* Raise stack overflow, don't push anything. */
5254 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5255 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5256 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5257 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5258 return;
5259 }
5260
5261 fFsw &= ~X86_FSW_TOP_MASK;
5262 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5263 pFpuCtx->FSW = fFsw;
5264
5265 iemFpuRotateStackPush(pFpuCtx);
5266 RT_NOREF(pVCpu);
5267}
5268
5269
5270/**
5271 * Stores a result in a FPU register and updates the FSW and FTW.
5272 *
5273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5274 * @param pFpuCtx The FPU context.
5275 * @param pResult The result to store.
5276 * @param iStReg Which FPU register to store it in.
5277 */
5278static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5279{
5280 Assert(iStReg < 8);
5281 uint16_t fNewFsw = pFpuCtx->FSW;
5282 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5283 fNewFsw &= ~X86_FSW_C_MASK;
5284 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5285 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5286 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5288 pFpuCtx->FSW = fNewFsw;
5289 pFpuCtx->FTW |= RT_BIT(iReg);
5290 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5291 RT_NOREF(pVCpu);
5292}
5293
5294
5295/**
5296 * Only updates the FPU status word (FSW) with the result of the current
5297 * instruction.
5298 *
5299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5300 * @param pFpuCtx The FPU context.
5301 * @param u16FSW The FSW output of the current instruction.
5302 */
5303static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5304{
5305 uint16_t fNewFsw = pFpuCtx->FSW;
5306 fNewFsw &= ~X86_FSW_C_MASK;
5307 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5308 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5309 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5310 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5311 pFpuCtx->FSW = fNewFsw;
5312 RT_NOREF(pVCpu);
5313}
5314
5315
5316/**
5317 * Pops one item off the FPU stack if no pending exception prevents it.
5318 *
5319 * @param pFpuCtx The FPU context.
5320 */
5321static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5322{
5323 /* Check pending exceptions. */
5324 uint16_t uFSW = pFpuCtx->FSW;
5325 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5326 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5327 return;
5328
5329 /* TOP--. */
5330 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5331 uFSW &= ~X86_FSW_TOP_MASK;
5332 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5333 pFpuCtx->FSW = uFSW;
5334
5335 /* Mark the previous ST0 as empty. */
5336 iOldTop >>= X86_FSW_TOP_SHIFT;
5337 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5338
5339 /* Rotate the registers. */
5340 iemFpuRotateStackPop(pFpuCtx);
5341}
5342
5343
5344/**
5345 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5346 *
5347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5348 * @param pResult The FPU operation result to push.
5349 * @param uFpuOpcode The FPU opcode value.
5350 */
5351void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5352{
5353 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5354 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5355 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5356}
5357
5358
5359/**
5360 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5361 * and sets FPUDP and FPUDS.
5362 *
5363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5364 * @param pResult The FPU operation result to push.
5365 * @param iEffSeg The effective segment register.
5366 * @param GCPtrEff The effective address relative to @a iEffSeg.
5367 * @param uFpuOpcode The FPU opcode value.
5368 */
5369void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5370 uint16_t uFpuOpcode) RT_NOEXCEPT
5371{
5372 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5373 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5374 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5375 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5376}
5377
5378
5379/**
5380 * Replace ST0 with the first value and push the second onto the FPU stack,
5381 * unless a pending exception prevents it.
5382 *
5383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5384 * @param pResult The FPU operation result to store and push.
5385 * @param uFpuOpcode The FPU opcode value.
5386 */
5387void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5388{
5389 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5390 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5391
5392 /* Update FSW and bail if there are pending exceptions afterwards. */
5393 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5394 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5395 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5396 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5397 {
5398 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5399 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5400 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5401 pFpuCtx->FSW = fFsw;
5402 return;
5403 }
5404
5405 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5406 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5407 {
5408 /* All is fine, push the actual value. */
5409 pFpuCtx->FTW |= RT_BIT(iNewTop);
5410 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5411 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5412 }
5413 else if (pFpuCtx->FCW & X86_FCW_IM)
5414 {
5415 /* Masked stack overflow, push QNaN. */
5416 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5417 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5418 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5419 }
5420 else
5421 {
5422 /* Raise stack overflow, don't push anything. */
5423 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5424 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5425 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5426 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5427 return;
5428 }
5429
5430 fFsw &= ~X86_FSW_TOP_MASK;
5431 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5432 pFpuCtx->FSW = fFsw;
5433
5434 iemFpuRotateStackPush(pFpuCtx);
5435}
5436
5437
5438/**
5439 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5440 * FOP.
5441 *
5442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5443 * @param pResult The result to store.
5444 * @param iStReg Which FPU register to store it in.
5445 * @param uFpuOpcode The FPU opcode value.
5446 */
5447void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5448{
5449 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5450 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5451 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5452}
5453
5454
5455/**
5456 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5457 * FOP, and then pops the stack.
5458 *
5459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5460 * @param pResult The result to store.
5461 * @param iStReg Which FPU register to store it in.
5462 * @param uFpuOpcode The FPU opcode value.
5463 */
5464void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5465{
5466 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5467 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5468 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5469 iemFpuMaybePopOne(pFpuCtx);
5470}
5471
5472
5473/**
5474 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5475 * FPUDP, and FPUDS.
5476 *
5477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5478 * @param pResult The result to store.
5479 * @param iStReg Which FPU register to store it in.
5480 * @param iEffSeg The effective memory operand selector register.
5481 * @param GCPtrEff The effective memory operand offset.
5482 * @param uFpuOpcode The FPU opcode value.
5483 */
5484void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5485 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5486{
5487 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5488 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5489 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5490 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5491}
5492
5493
5494/**
5495 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5496 * FPUDP, and FPUDS, and then pops the stack.
5497 *
5498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5499 * @param pResult The result to store.
5500 * @param iStReg Which FPU register to store it in.
5501 * @param iEffSeg The effective memory operand selector register.
5502 * @param GCPtrEff The effective memory operand offset.
5503 * @param uFpuOpcode The FPU opcode value.
5504 */
5505void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5506 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5507{
5508 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5509 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5510 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5511 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5512 iemFpuMaybePopOne(pFpuCtx);
5513}
5514
5515
5516/**
5517 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5518 *
5519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5520 * @param uFpuOpcode The FPU opcode value.
5521 */
5522void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5523{
5524 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5525 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5526}
5527
5528
5529/**
5530 * Updates the FSW, FOP, FPUIP, and FPUCS.
5531 *
5532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5533 * @param u16FSW The FSW from the current instruction.
5534 * @param uFpuOpcode The FPU opcode value.
5535 */
5536void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5537{
5538 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5539 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5540 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5541}
5542
5543
5544/**
5545 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5546 *
5547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5548 * @param u16FSW The FSW from the current instruction.
5549 * @param uFpuOpcode The FPU opcode value.
5550 */
5551void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5552{
5553 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5554 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5555 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5556 iemFpuMaybePopOne(pFpuCtx);
5557}
5558
5559
5560/**
5561 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5562 *
5563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5564 * @param u16FSW The FSW from the current instruction.
5565 * @param iEffSeg The effective memory operand selector register.
5566 * @param GCPtrEff The effective memory operand offset.
5567 * @param uFpuOpcode The FPU opcode value.
5568 */
5569void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5570{
5571 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5572 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5573 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5574 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5575}
5576
5577
5578/**
5579 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5580 *
5581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5582 * @param u16FSW The FSW from the current instruction.
5583 * @param uFpuOpcode The FPU opcode value.
5584 */
5585void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5586{
5587 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5588 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5589 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5590 iemFpuMaybePopOne(pFpuCtx);
5591 iemFpuMaybePopOne(pFpuCtx);
5592}
5593
5594
5595/**
5596 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5597 *
5598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5599 * @param u16FSW The FSW from the current instruction.
5600 * @param iEffSeg The effective memory operand selector register.
5601 * @param GCPtrEff The effective memory operand offset.
5602 * @param uFpuOpcode The FPU opcode value.
5603 */
5604void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5605 uint16_t uFpuOpcode) RT_NOEXCEPT
5606{
5607 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5608 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5609 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5610 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5611 iemFpuMaybePopOne(pFpuCtx);
5612}
5613
5614
5615/**
5616 * Worker routine for raising an FPU stack underflow exception.
5617 *
5618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5619 * @param pFpuCtx The FPU context.
5620 * @param iStReg The stack register being accessed.
5621 */
5622static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5623{
5624 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5625 if (pFpuCtx->FCW & X86_FCW_IM)
5626 {
5627 /* Masked underflow. */
5628 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5629 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5630 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5631 if (iStReg != UINT8_MAX)
5632 {
5633 pFpuCtx->FTW |= RT_BIT(iReg);
5634 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5635 }
5636 }
5637 else
5638 {
5639 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5640 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5641 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5642 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5643 }
5644 RT_NOREF(pVCpu);
5645}
5646
5647
5648/**
5649 * Raises a FPU stack underflow exception.
5650 *
5651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5652 * @param iStReg The destination register that should be loaded
5653 * with QNaN if \#IS is not masked. Specify
5654 * UINT8_MAX if none (like for fcom).
5655 * @param uFpuOpcode The FPU opcode value.
5656 */
5657void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5658{
5659 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5660 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5661 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5662}
5663
5664
5665void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5666{
5667 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5668 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5669 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5670 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5671}
5672
5673
5674void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5675{
5676 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5677 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5678 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5679 iemFpuMaybePopOne(pFpuCtx);
5680}
5681
5682
5683void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5684 uint16_t uFpuOpcode) RT_NOEXCEPT
5685{
5686 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5687 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5688 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5689 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5690 iemFpuMaybePopOne(pFpuCtx);
5691}
5692
5693
5694void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5695{
5696 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5697 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5698 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5699 iemFpuMaybePopOne(pFpuCtx);
5700 iemFpuMaybePopOne(pFpuCtx);
5701}
5702
5703
5704void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5705{
5706 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5707 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5708
5709 if (pFpuCtx->FCW & X86_FCW_IM)
5710 {
5711 /* Masked overflow - Push QNaN. */
5712 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5713 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5714 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5715 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5716 pFpuCtx->FTW |= RT_BIT(iNewTop);
5717 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5718 iemFpuRotateStackPush(pFpuCtx);
5719 }
5720 else
5721 {
5722 /* Exception pending - don't change TOP or the register stack. */
5723 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5724 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5725 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5726 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5727 }
5728}
5729
5730
5731void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5732{
5733 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5734 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5735
5736 if (pFpuCtx->FCW & X86_FCW_IM)
5737 {
5738 /* Masked overflow - Push QNaN. */
5739 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5740 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5741 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5742 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5743 pFpuCtx->FTW |= RT_BIT(iNewTop);
5744 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5745 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5746 iemFpuRotateStackPush(pFpuCtx);
5747 }
5748 else
5749 {
5750 /* Exception pending - don't change TOP or the register stack. */
5751 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5752 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5753 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5754 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5755 }
5756}
5757
5758
5759/**
5760 * Worker routine for raising an FPU stack overflow exception on a push.
5761 *
5762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5763 * @param pFpuCtx The FPU context.
5764 */
5765static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5766{
5767 if (pFpuCtx->FCW & X86_FCW_IM)
5768 {
5769 /* Masked overflow. */
5770 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5771 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5772 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5773 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5774 pFpuCtx->FTW |= RT_BIT(iNewTop);
5775 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5776 iemFpuRotateStackPush(pFpuCtx);
5777 }
5778 else
5779 {
5780 /* Exception pending - don't change TOP or the register stack. */
5781 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5782 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5783 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5784 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5785 }
5786 RT_NOREF(pVCpu);
5787}
5788
5789
5790/**
5791 * Raises a FPU stack overflow exception on a push.
5792 *
5793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5794 * @param uFpuOpcode The FPU opcode value.
5795 */
5796void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5797{
5798 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5799 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5800 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5801}
5802
5803
5804/**
5805 * Raises a FPU stack overflow exception on a push with a memory operand.
5806 *
5807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5808 * @param iEffSeg The effective memory operand selector register.
5809 * @param GCPtrEff The effective memory operand offset.
5810 * @param uFpuOpcode The FPU opcode value.
5811 */
5812void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5813{
5814 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5815 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5816 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5817 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5818}
5819
5820/** @} */
5821
5822
5823/** @name Memory access.
5824 *
5825 * @{
5826 */
5827
5828#undef LOG_GROUP
5829#define LOG_GROUP LOG_GROUP_IEM_MEM
5830
5831/**
5832 * Updates the IEMCPU::cbWritten counter if applicable.
5833 *
5834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5835 * @param fAccess The access being accounted for.
5836 * @param cbMem The access size.
5837 */
5838DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5839{
5840 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5841 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5842 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5843}
5844
5845
5846/**
5847 * Applies the segment limit, base and attributes.
5848 *
5849 * This may raise a \#GP or \#SS.
5850 *
5851 * @returns VBox strict status code.
5852 *
5853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5854 * @param fAccess The kind of access which is being performed.
5855 * @param iSegReg The index of the segment register to apply.
5856 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5857 * TSS, ++).
5858 * @param cbMem The access size.
5859 * @param pGCPtrMem Pointer to the guest memory address to apply
5860 * segmentation to. Input and output parameter.
5861 */
5862VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5863{
5864 if (iSegReg == UINT8_MAX)
5865 return VINF_SUCCESS;
5866
5867 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5868 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5869 switch (IEM_GET_CPU_MODE(pVCpu))
5870 {
5871 case IEMMODE_16BIT:
5872 case IEMMODE_32BIT:
5873 {
5874 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5875 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5876
5877 if ( pSel->Attr.n.u1Present
5878 && !pSel->Attr.n.u1Unusable)
5879 {
5880 Assert(pSel->Attr.n.u1DescType);
5881 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5882 {
5883 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5884 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5885 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5886
5887 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5888 {
5889 /** @todo CPL check. */
5890 }
5891
5892 /*
5893 * There are two kinds of data selectors, normal and expand down.
5894 */
5895 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5896 {
5897 if ( GCPtrFirst32 > pSel->u32Limit
5898 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5899 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5900 }
5901 else
5902 {
5903 /*
5904 * The upper boundary is defined by the B bit, not the G bit!
5905 */
5906 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5907 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5908 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5909 }
5910 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5911 }
5912 else
5913 {
5914 /*
5915 * Code selector and usually be used to read thru, writing is
5916 * only permitted in real and V8086 mode.
5917 */
5918 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5919 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5920 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5921 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5922 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5923
5924 if ( GCPtrFirst32 > pSel->u32Limit
5925 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5926 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5927
5928 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5929 {
5930 /** @todo CPL check. */
5931 }
5932
5933 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5934 }
5935 }
5936 else
5937 return iemRaiseGeneralProtectionFault0(pVCpu);
5938 return VINF_SUCCESS;
5939 }
5940
5941 case IEMMODE_64BIT:
5942 {
5943 RTGCPTR GCPtrMem = *pGCPtrMem;
5944 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5945 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5946
5947 Assert(cbMem >= 1);
5948 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5949 return VINF_SUCCESS;
5950 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5951 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5952 return iemRaiseGeneralProtectionFault0(pVCpu);
5953 }
5954
5955 default:
5956 AssertFailedReturn(VERR_IEM_IPE_7);
5957 }
5958}
5959
5960
5961/**
5962 * Translates a virtual address to a physical physical address and checks if we
5963 * can access the page as specified.
5964 *
5965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5966 * @param GCPtrMem The virtual address.
5967 * @param cbAccess The access size, for raising \#PF correctly for
5968 * FXSAVE and such.
5969 * @param fAccess The intended access.
5970 * @param pGCPhysMem Where to return the physical address.
5971 */
5972VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5973 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5974{
5975 /** @todo Need a different PGM interface here. We're currently using
5976 * generic / REM interfaces. this won't cut it for R0. */
5977 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5978 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5979 * here. */
5980 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5981 PGMPTWALKFAST WalkFast;
5982 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5983 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5984 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5985 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5986 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5987 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5988 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5989 fQPage |= PGMQPAGE_F_USER_MODE;
5990 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5991 if (RT_SUCCESS(rc))
5992 {
5993 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5994
5995 /* If the page is writable and does not have the no-exec bit set, all
5996 access is allowed. Otherwise we'll have to check more carefully... */
5997 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5998 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5999 || (WalkFast.fEffective & X86_PTE_RW)
6000 || ( ( IEM_GET_CPL(pVCpu) != 3
6001 || (fAccess & IEM_ACCESS_WHAT_SYS))
6002 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6003 && ( (WalkFast.fEffective & X86_PTE_US)
6004 || IEM_GET_CPL(pVCpu) != 3
6005 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6006 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6007 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6008 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6009 )
6010 );
6011
6012 /* PGMGstQueryPageFast sets the A & D bits. */
6013 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6014 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6015
6016 *pGCPhysMem = WalkFast.GCPhys;
6017 return VINF_SUCCESS;
6018 }
6019
6020 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6021 /** @todo Check unassigned memory in unpaged mode. */
6022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6023 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6025#endif
6026 *pGCPhysMem = NIL_RTGCPHYS;
6027 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6028}
6029
6030#if 0 /*unused*/
6031/**
6032 * Looks up a memory mapping entry.
6033 *
6034 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6036 * @param pvMem The memory address.
6037 * @param fAccess The access to.
6038 */
6039DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6040{
6041 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6042 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6043 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6044 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6045 return 0;
6046 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6047 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6048 return 1;
6049 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6050 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6051 return 2;
6052 return VERR_NOT_FOUND;
6053}
6054#endif
6055
6056/**
6057 * Finds a free memmap entry when using iNextMapping doesn't work.
6058 *
6059 * @returns Memory mapping index, 1024 on failure.
6060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6061 */
6062static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6063{
6064 /*
6065 * The easy case.
6066 */
6067 if (pVCpu->iem.s.cActiveMappings == 0)
6068 {
6069 pVCpu->iem.s.iNextMapping = 1;
6070 return 0;
6071 }
6072
6073 /* There should be enough mappings for all instructions. */
6074 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6075
6076 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6077 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6078 return i;
6079
6080 AssertFailedReturn(1024);
6081}
6082
6083
6084/**
6085 * Commits a bounce buffer that needs writing back and unmaps it.
6086 *
6087 * @returns Strict VBox status code.
6088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6089 * @param iMemMap The index of the buffer to commit.
6090 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6091 * Always false in ring-3, obviously.
6092 */
6093static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6094{
6095 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6096 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6097#ifdef IN_RING3
6098 Assert(!fPostponeFail);
6099 RT_NOREF_PV(fPostponeFail);
6100#endif
6101
6102 /*
6103 * Do the writing.
6104 */
6105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6106 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6107 {
6108 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6109 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6110 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6111 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6112 {
6113 /*
6114 * Carefully and efficiently dealing with access handler return
6115 * codes make this a little bloated.
6116 */
6117 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6118 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6119 pbBuf,
6120 cbFirst,
6121 PGMACCESSORIGIN_IEM);
6122 if (rcStrict == VINF_SUCCESS)
6123 {
6124 if (cbSecond)
6125 {
6126 rcStrict = PGMPhysWrite(pVM,
6127 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6128 pbBuf + cbFirst,
6129 cbSecond,
6130 PGMACCESSORIGIN_IEM);
6131 if (rcStrict == VINF_SUCCESS)
6132 { /* nothing */ }
6133 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6134 {
6135 LogEx(LOG_GROUP_IEM,
6136 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6139 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6140 }
6141#ifndef IN_RING3
6142 else if (fPostponeFail)
6143 {
6144 LogEx(LOG_GROUP_IEM,
6145 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6146 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6147 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6148 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6149 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6150 return iemSetPassUpStatus(pVCpu, rcStrict);
6151 }
6152#endif
6153 else
6154 {
6155 LogEx(LOG_GROUP_IEM,
6156 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6159 return rcStrict;
6160 }
6161 }
6162 }
6163 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6164 {
6165 if (!cbSecond)
6166 {
6167 LogEx(LOG_GROUP_IEM,
6168 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6169 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6170 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6171 }
6172 else
6173 {
6174 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6175 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6176 pbBuf + cbFirst,
6177 cbSecond,
6178 PGMACCESSORIGIN_IEM);
6179 if (rcStrict2 == VINF_SUCCESS)
6180 {
6181 LogEx(LOG_GROUP_IEM,
6182 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6183 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6184 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6185 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6186 }
6187 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6188 {
6189 LogEx(LOG_GROUP_IEM,
6190 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6192 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6193 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6194 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6195 }
6196#ifndef IN_RING3
6197 else if (fPostponeFail)
6198 {
6199 LogEx(LOG_GROUP_IEM,
6200 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6201 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6203 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6204 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6205 return iemSetPassUpStatus(pVCpu, rcStrict);
6206 }
6207#endif
6208 else
6209 {
6210 LogEx(LOG_GROUP_IEM,
6211 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6212 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6213 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6214 return rcStrict2;
6215 }
6216 }
6217 }
6218#ifndef IN_RING3
6219 else if (fPostponeFail)
6220 {
6221 LogEx(LOG_GROUP_IEM,
6222 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6223 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6224 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6225 if (!cbSecond)
6226 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6227 else
6228 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6229 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6230 return iemSetPassUpStatus(pVCpu, rcStrict);
6231 }
6232#endif
6233 else
6234 {
6235 LogEx(LOG_GROUP_IEM,
6236 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6238 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6239 return rcStrict;
6240 }
6241 }
6242 else
6243 {
6244 /*
6245 * No access handlers, much simpler.
6246 */
6247 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6248 if (RT_SUCCESS(rc))
6249 {
6250 if (cbSecond)
6251 {
6252 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6253 if (RT_SUCCESS(rc))
6254 { /* likely */ }
6255 else
6256 {
6257 LogEx(LOG_GROUP_IEM,
6258 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6259 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6260 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6261 return rc;
6262 }
6263 }
6264 }
6265 else
6266 {
6267 LogEx(LOG_GROUP_IEM,
6268 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6269 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6270 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6271 return rc;
6272 }
6273 }
6274 }
6275
6276#if defined(IEM_LOG_MEMORY_WRITES)
6277 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6278 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6279 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6280 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6281 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6282 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6283
6284 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6285 g_cbIemWrote = cbWrote;
6286 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6287#endif
6288
6289 /*
6290 * Free the mapping entry.
6291 */
6292 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6293 Assert(pVCpu->iem.s.cActiveMappings != 0);
6294 pVCpu->iem.s.cActiveMappings--;
6295 return VINF_SUCCESS;
6296}
6297
6298
6299/**
6300 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6301 */
6302DECL_FORCE_INLINE(uint32_t)
6303iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6304{
6305 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6306 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6307 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6308 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6309}
6310
6311
6312/**
6313 * iemMemMap worker that deals with a request crossing pages.
6314 */
6315static VBOXSTRICTRC
6316iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6317 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6318{
6319 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6320 Assert(cbMem <= GUEST_PAGE_SIZE);
6321
6322 /*
6323 * Do the address translations.
6324 */
6325 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6326 RTGCPHYS GCPhysFirst;
6327 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6328 if (rcStrict != VINF_SUCCESS)
6329 return rcStrict;
6330 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6331
6332 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6333 RTGCPHYS GCPhysSecond;
6334 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6335 cbSecondPage, fAccess, &GCPhysSecond);
6336 if (rcStrict != VINF_SUCCESS)
6337 return rcStrict;
6338 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6339 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6340
6341 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6342
6343 /*
6344 * Check for data breakpoints.
6345 */
6346 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6347 { /* likely */ }
6348 else
6349 {
6350 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6351 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6352 cbSecondPage, fAccess);
6353 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6354 if (fDataBps > 1)
6355 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6356 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6357 }
6358
6359 /*
6360 * Read in the current memory content if it's a read, execute or partial
6361 * write access.
6362 */
6363 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6364
6365 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6366 {
6367 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6368 {
6369 /*
6370 * Must carefully deal with access handler status codes here,
6371 * makes the code a bit bloated.
6372 */
6373 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6374 if (rcStrict == VINF_SUCCESS)
6375 {
6376 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6377 if (rcStrict == VINF_SUCCESS)
6378 { /*likely */ }
6379 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6380 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6381 else
6382 {
6383 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6384 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6385 return rcStrict;
6386 }
6387 }
6388 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6389 {
6390 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6391 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6392 {
6393 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6394 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6395 }
6396 else
6397 {
6398 LogEx(LOG_GROUP_IEM,
6399 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6400 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6401 return rcStrict2;
6402 }
6403 }
6404 else
6405 {
6406 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6407 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6408 return rcStrict;
6409 }
6410 }
6411 else
6412 {
6413 /*
6414 * No informational status codes here, much more straight forward.
6415 */
6416 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6417 if (RT_SUCCESS(rc))
6418 {
6419 Assert(rc == VINF_SUCCESS);
6420 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6421 if (RT_SUCCESS(rc))
6422 Assert(rc == VINF_SUCCESS);
6423 else
6424 {
6425 LogEx(LOG_GROUP_IEM,
6426 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6427 return rc;
6428 }
6429 }
6430 else
6431 {
6432 LogEx(LOG_GROUP_IEM,
6433 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6434 return rc;
6435 }
6436 }
6437 }
6438#ifdef VBOX_STRICT
6439 else
6440 memset(pbBuf, 0xcc, cbMem);
6441 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6442 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6443#endif
6444 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6445
6446 /*
6447 * Commit the bounce buffer entry.
6448 */
6449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6451 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6452 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6453 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6454 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6455 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6456 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6457 pVCpu->iem.s.cActiveMappings++;
6458
6459 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6460 *ppvMem = pbBuf;
6461 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6462 return VINF_SUCCESS;
6463}
6464
6465
6466/**
6467 * iemMemMap woker that deals with iemMemPageMap failures.
6468 */
6469static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6470 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6471{
6472 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6473
6474 /*
6475 * Filter out conditions we can handle and the ones which shouldn't happen.
6476 */
6477 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6478 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6479 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6480 {
6481 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6482 return rcMap;
6483 }
6484 pVCpu->iem.s.cPotentialExits++;
6485
6486 /*
6487 * Read in the current memory content if it's a read, execute or partial
6488 * write access.
6489 */
6490 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6491 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6492 {
6493 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6494 memset(pbBuf, 0xff, cbMem);
6495 else
6496 {
6497 int rc;
6498 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6499 {
6500 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6501 if (rcStrict == VINF_SUCCESS)
6502 { /* nothing */ }
6503 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6504 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6505 else
6506 {
6507 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6508 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6509 return rcStrict;
6510 }
6511 }
6512 else
6513 {
6514 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6515 if (RT_SUCCESS(rc))
6516 { /* likely */ }
6517 else
6518 {
6519 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6520 GCPhysFirst, rc));
6521 return rc;
6522 }
6523 }
6524 }
6525 }
6526#ifdef VBOX_STRICT
6527 else
6528 memset(pbBuf, 0xcc, cbMem);
6529#endif
6530#ifdef VBOX_STRICT
6531 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6532 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6533#endif
6534
6535 /*
6536 * Commit the bounce buffer entry.
6537 */
6538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6540 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6541 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6542 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6543 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6544 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6545 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6546 pVCpu->iem.s.cActiveMappings++;
6547
6548 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6549 *ppvMem = pbBuf;
6550 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6551 return VINF_SUCCESS;
6552}
6553
6554
6555
6556/**
6557 * Maps the specified guest memory for the given kind of access.
6558 *
6559 * This may be using bounce buffering of the memory if it's crossing a page
6560 * boundary or if there is an access handler installed for any of it. Because
6561 * of lock prefix guarantees, we're in for some extra clutter when this
6562 * happens.
6563 *
6564 * This may raise a \#GP, \#SS, \#PF or \#AC.
6565 *
6566 * @returns VBox strict status code.
6567 *
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 * @param ppvMem Where to return the pointer to the mapped memory.
6570 * @param pbUnmapInfo Where to return unmap info to be passed to
6571 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6572 * done.
6573 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6574 * 8, 12, 16, 32 or 512. When used by string operations
6575 * it can be up to a page.
6576 * @param iSegReg The index of the segment register to use for this
6577 * access. The base and limits are checked. Use UINT8_MAX
6578 * to indicate that no segmentation is required (for IDT,
6579 * GDT and LDT accesses).
6580 * @param GCPtrMem The address of the guest memory.
6581 * @param fAccess How the memory is being accessed. The
6582 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6583 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6584 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6585 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6586 * set.
6587 * @param uAlignCtl Alignment control:
6588 * - Bits 15:0 is the alignment mask.
6589 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6590 * IEM_MEMMAP_F_ALIGN_SSE, and
6591 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6592 * Pass zero to skip alignment.
6593 */
6594VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6595 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6596{
6597 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6598
6599 /*
6600 * Check the input and figure out which mapping entry to use.
6601 */
6602 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6603 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6604 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6605 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6606 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6607
6608 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6609 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6610 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6611 {
6612 iMemMap = iemMemMapFindFree(pVCpu);
6613 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6614 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6615 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6616 pVCpu->iem.s.aMemMappings[2].fAccess),
6617 VERR_IEM_IPE_9);
6618 }
6619
6620 /*
6621 * Map the memory, checking that we can actually access it. If something
6622 * slightly complicated happens, fall back on bounce buffering.
6623 */
6624 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6625 if (rcStrict == VINF_SUCCESS)
6626 { /* likely */ }
6627 else
6628 return rcStrict;
6629
6630 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6631 { /* likely */ }
6632 else
6633 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6634
6635 /*
6636 * Alignment check.
6637 */
6638 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6639 { /* likelyish */ }
6640 else
6641 {
6642 /* Misaligned access. */
6643 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6644 {
6645 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6646 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6647 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6648 {
6649 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6650
6651 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6652 { /* likely */ }
6653 else
6654 return iemRaiseAlignmentCheckException(pVCpu);
6655 }
6656 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6657 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6658 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6659 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6660 * that's what FXSAVE does on a 10980xe. */
6661 && iemMemAreAlignmentChecksEnabled(pVCpu))
6662 return iemRaiseAlignmentCheckException(pVCpu);
6663 else
6664 return iemRaiseGeneralProtectionFault0(pVCpu);
6665 }
6666
6667#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6668 /* If the access is atomic there are host platform alignmnet restrictions
6669 we need to conform with. */
6670 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6671# if defined(RT_ARCH_AMD64)
6672 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6673# elif defined(RT_ARCH_ARM64)
6674 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6675# else
6676# error port me
6677# endif
6678 )
6679 { /* okay */ }
6680 else
6681 {
6682 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6683 pVCpu->iem.s.cMisalignedAtomics += 1;
6684 return VINF_EM_EMULATE_SPLIT_LOCK;
6685 }
6686#endif
6687 }
6688
6689#ifdef IEM_WITH_DATA_TLB
6690 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6691
6692 /*
6693 * Get the TLB entry for this page and check PT flags.
6694 *
6695 * We reload the TLB entry if we need to set the dirty bit (accessed
6696 * should in theory always be set).
6697 */
6698 uint8_t *pbMem = NULL;
6699 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6700 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6701 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6702 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6703 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6704 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6705 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6706 {
6707# ifdef IEM_WITH_TLB_STATISTICS
6708 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6709#endif
6710
6711 /* If the page is either supervisor only or non-writable, we need to do
6712 more careful access checks. */
6713 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6714 {
6715 /* Write to read only memory? */
6716 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6717 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6718 && ( ( IEM_GET_CPL(pVCpu) == 3
6719 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6720 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6721 {
6722 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6723 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6724 }
6725
6726 /* Kernel memory accessed by userland? */
6727 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6728 && IEM_GET_CPL(pVCpu) == 3
6729 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6730 {
6731 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6732 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6733 }
6734 }
6735
6736 /* Look up the physical page info if necessary. */
6737 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6738# ifdef IN_RING3
6739 pbMem = pTlbe->pbMappingR3;
6740# else
6741 pbMem = NULL;
6742# endif
6743 else
6744 {
6745 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6746 { /* likely */ }
6747 else
6748 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6749 pTlbe->pbMappingR3 = NULL;
6750 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6751 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6752 &pbMem, &pTlbe->fFlagsAndPhysRev);
6753 AssertRCReturn(rc, rc);
6754# ifdef IN_RING3
6755 pTlbe->pbMappingR3 = pbMem;
6756# endif
6757 }
6758 }
6759 else
6760 {
6761 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6762
6763 /* This page table walking will set A bits as required by the access while performing the walk.
6764 ASSUMES these are set when the address is translated rather than on commit... */
6765 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6766 PGMPTWALKFAST WalkFast;
6767 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6768 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6769 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6770 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6771 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6772 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6773 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6774 fQPage |= PGMQPAGE_F_USER_MODE;
6775 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6776 if (RT_SUCCESS(rc))
6777 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6778 else
6779 {
6780 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6781# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6782 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6783 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6784# endif
6785 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6786 }
6787
6788 uint32_t fDataBps;
6789 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6790 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6791 {
6792 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6793 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6794 {
6795 pTlbe--;
6796 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6797 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6798 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6799 }
6800 else
6801 {
6802 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6803 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6804 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6805 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6806 }
6807 }
6808 else
6809 {
6810 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6811 to the page with the data access breakpoint armed on it to pass thru here. */
6812 if (fDataBps > 1)
6813 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6814 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6815 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6816 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6817 pTlbe->uTag = uTagNoRev;
6818 }
6819 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6820 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6821 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6822 pTlbe->GCPhys = GCPhysPg;
6823 pTlbe->pbMappingR3 = NULL;
6824 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6825 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6826 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6827 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6828 || IEM_GET_CPL(pVCpu) != 3
6829 || (fAccess & IEM_ACCESS_WHAT_SYS));
6830
6831 /* Resolve the physical address. */
6832 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6833 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6834 &pbMem, &pTlbe->fFlagsAndPhysRev);
6835 AssertRCReturn(rc, rc);
6836# ifdef IN_RING3
6837 pTlbe->pbMappingR3 = pbMem;
6838# endif
6839 }
6840
6841 /*
6842 * Check the physical page level access and mapping.
6843 */
6844 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6845 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6846 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6847 { /* probably likely */ }
6848 else
6849 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6850 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6851 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6852 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6853 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6854 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6855
6856 if (pbMem)
6857 {
6858 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6859 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6860 fAccess |= IEM_ACCESS_NOT_LOCKED;
6861 }
6862 else
6863 {
6864 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6865 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6866 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6867 if (rcStrict != VINF_SUCCESS)
6868 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6869 }
6870
6871 void * const pvMem = pbMem;
6872
6873 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6874 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6875 if (fAccess & IEM_ACCESS_TYPE_READ)
6876 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6877
6878#else /* !IEM_WITH_DATA_TLB */
6879
6880 RTGCPHYS GCPhysFirst;
6881 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6882 if (rcStrict != VINF_SUCCESS)
6883 return rcStrict;
6884
6885 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6886 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6887 if (fAccess & IEM_ACCESS_TYPE_READ)
6888 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6889
6890 void *pvMem;
6891 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6892 if (rcStrict != VINF_SUCCESS)
6893 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6894
6895#endif /* !IEM_WITH_DATA_TLB */
6896
6897 /*
6898 * Fill in the mapping table entry.
6899 */
6900 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6902 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6903 pVCpu->iem.s.cActiveMappings += 1;
6904
6905 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6906 *ppvMem = pvMem;
6907 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6908 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6909 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6910
6911 return VINF_SUCCESS;
6912}
6913
6914
6915/**
6916 * Commits the guest memory if bounce buffered and unmaps it.
6917 *
6918 * @returns Strict VBox status code.
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 * @param bUnmapInfo Unmap info set by iemMemMap.
6921 */
6922VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6923{
6924 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6925 AssertMsgReturn( (bUnmapInfo & 0x08)
6926 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6927 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6928 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6929 VERR_NOT_FOUND);
6930
6931 /* If it's bounce buffered, we may need to write back the buffer. */
6932 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6933 {
6934 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6935 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6936 }
6937 /* Otherwise unlock it. */
6938 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6939 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6940
6941 /* Free the entry. */
6942 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6943 Assert(pVCpu->iem.s.cActiveMappings != 0);
6944 pVCpu->iem.s.cActiveMappings--;
6945 return VINF_SUCCESS;
6946}
6947
6948
6949/**
6950 * Rolls back the guest memory (conceptually only) and unmaps it.
6951 *
6952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6953 * @param bUnmapInfo Unmap info set by iemMemMap.
6954 */
6955void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6956{
6957 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6958 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6959 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6960 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6961 == ((unsigned)bUnmapInfo >> 4),
6962 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6963
6964 /* Unlock it if necessary. */
6965 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6966 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6967
6968 /* Free the entry. */
6969 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6970 Assert(pVCpu->iem.s.cActiveMappings != 0);
6971 pVCpu->iem.s.cActiveMappings--;
6972}
6973
6974#ifdef IEM_WITH_SETJMP
6975
6976/**
6977 * Maps the specified guest memory for the given kind of access, longjmp on
6978 * error.
6979 *
6980 * This may be using bounce buffering of the memory if it's crossing a page
6981 * boundary or if there is an access handler installed for any of it. Because
6982 * of lock prefix guarantees, we're in for some extra clutter when this
6983 * happens.
6984 *
6985 * This may raise a \#GP, \#SS, \#PF or \#AC.
6986 *
6987 * @returns Pointer to the mapped memory.
6988 *
6989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6990 * @param bUnmapInfo Where to return unmap info to be passed to
6991 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6992 * iemMemCommitAndUnmapWoSafeJmp,
6993 * iemMemCommitAndUnmapRoSafeJmp,
6994 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6995 * when done.
6996 * @param cbMem The number of bytes to map. This is usually 1,
6997 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6998 * string operations it can be up to a page.
6999 * @param iSegReg The index of the segment register to use for
7000 * this access. The base and limits are checked.
7001 * Use UINT8_MAX to indicate that no segmentation
7002 * is required (for IDT, GDT and LDT accesses).
7003 * @param GCPtrMem The address of the guest memory.
7004 * @param fAccess How the memory is being accessed. The
7005 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7006 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7007 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7008 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7009 * set.
7010 * @param uAlignCtl Alignment control:
7011 * - Bits 15:0 is the alignment mask.
7012 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7013 * IEM_MEMMAP_F_ALIGN_SSE, and
7014 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7015 * Pass zero to skip alignment.
7016 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7017 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7018 * needs counting as such in the statistics.
7019 */
7020template<bool a_fSafeCall = false>
7021static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7022 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7023{
7024 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7025
7026 /*
7027 * Check the input, check segment access and adjust address
7028 * with segment base.
7029 */
7030 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7031 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7032 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7033
7034 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7035 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7036 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7037
7038 /*
7039 * Alignment check.
7040 */
7041 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7042 { /* likelyish */ }
7043 else
7044 {
7045 /* Misaligned access. */
7046 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7047 {
7048 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7049 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7050 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7051 {
7052 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7053
7054 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7055 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7056 }
7057 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7058 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7059 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7060 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7061 * that's what FXSAVE does on a 10980xe. */
7062 && iemMemAreAlignmentChecksEnabled(pVCpu))
7063 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7064 else
7065 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7066 }
7067
7068#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7069 /* If the access is atomic there are host platform alignmnet restrictions
7070 we need to conform with. */
7071 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7072# if defined(RT_ARCH_AMD64)
7073 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7074# elif defined(RT_ARCH_ARM64)
7075 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7076# else
7077# error port me
7078# endif
7079 )
7080 { /* okay */ }
7081 else
7082 {
7083 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7084 pVCpu->iem.s.cMisalignedAtomics += 1;
7085 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7086 }
7087#endif
7088 }
7089
7090 /*
7091 * Figure out which mapping entry to use.
7092 */
7093 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7094 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7095 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7096 {
7097 iMemMap = iemMemMapFindFree(pVCpu);
7098 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7099 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7100 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7101 pVCpu->iem.s.aMemMappings[2].fAccess),
7102 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7103 }
7104
7105 /*
7106 * Crossing a page boundary?
7107 */
7108 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7109 { /* No (likely). */ }
7110 else
7111 {
7112 void *pvMem;
7113 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7114 if (rcStrict == VINF_SUCCESS)
7115 return pvMem;
7116 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7117 }
7118
7119#ifdef IEM_WITH_DATA_TLB
7120 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7121
7122 /*
7123 * Get the TLB entry for this page checking that it has the A & D bits
7124 * set as per fAccess flags.
7125 */
7126 /** @todo make the caller pass these in with fAccess. */
7127 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7128 ? IEMTLBE_F_PT_NO_USER : 0;
7129 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7130 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7131 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7132 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7133 ? IEMTLBE_F_PT_NO_WRITE : 0)
7134 : 0;
7135 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7136 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7137 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7138 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7139 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7140 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7141 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7142 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7143 {
7144# ifdef IEM_WITH_TLB_STATISTICS
7145 if (a_fSafeCall)
7146 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7147 else
7148 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7149# endif
7150 }
7151 else
7152 {
7153 if (a_fSafeCall)
7154 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7155 else
7156 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7157
7158 /* This page table walking will set A and D bits as required by the
7159 access while performing the walk.
7160 ASSUMES these are set when the address is translated rather than on commit... */
7161 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7162 PGMPTWALKFAST WalkFast;
7163 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7164 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7165 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7166 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7167 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7168 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7169 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7170 fQPage |= PGMQPAGE_F_USER_MODE;
7171 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7172 if (RT_SUCCESS(rc))
7173 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7174 else
7175 {
7176 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7177# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7178 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7179 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7180# endif
7181 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7182 }
7183
7184 uint32_t fDataBps;
7185 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7186 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7187 {
7188 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7189 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7190 {
7191 pTlbe--;
7192 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7193 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7194 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7195 }
7196 else
7197 {
7198 if (a_fSafeCall)
7199 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7200 else
7201 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7202 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7203 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7204 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7205 }
7206 }
7207 else
7208 {
7209 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7210 to the page with the data access breakpoint armed on it to pass thru here. */
7211 if (fDataBps > 1)
7212 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7213 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7214 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7215 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7216 pTlbe->uTag = uTagNoRev;
7217 }
7218 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7219 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7220 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7221 pTlbe->GCPhys = GCPhysPg;
7222 pTlbe->pbMappingR3 = NULL;
7223 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7224 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7225 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7226
7227 /* Resolve the physical address. */
7228 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7229 uint8_t *pbMemFullLoad = NULL;
7230 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7231 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7232 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7233# ifdef IN_RING3
7234 pTlbe->pbMappingR3 = pbMemFullLoad;
7235# endif
7236 }
7237
7238 /*
7239 * Check the flags and physical revision.
7240 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7241 * just to keep the code structure simple (i.e. avoid gotos or similar).
7242 */
7243 uint8_t *pbMem;
7244 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7245 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7246# ifdef IN_RING3
7247 pbMem = pTlbe->pbMappingR3;
7248# else
7249 pbMem = NULL;
7250# endif
7251 else
7252 {
7253 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7254
7255 /*
7256 * Okay, something isn't quite right or needs refreshing.
7257 */
7258 /* Write to read only memory? */
7259 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7260 {
7261 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7262# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7263/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7264 * to trigger an \#PG or a VM nested paging exit here yet! */
7265 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7266 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7267# endif
7268 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7269 }
7270
7271 /* Kernel memory accessed by userland? */
7272 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7273 {
7274 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7275# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7276/** @todo TLB: See above. */
7277 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7278 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7279# endif
7280 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7281 }
7282
7283 /*
7284 * Check if the physical page info needs updating.
7285 */
7286 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7287# ifdef IN_RING3
7288 pbMem = pTlbe->pbMappingR3;
7289# else
7290 pbMem = NULL;
7291# endif
7292 else
7293 {
7294 pTlbe->pbMappingR3 = NULL;
7295 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7296 pbMem = NULL;
7297 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7298 &pbMem, &pTlbe->fFlagsAndPhysRev);
7299 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7300# ifdef IN_RING3
7301 pTlbe->pbMappingR3 = pbMem;
7302# endif
7303 }
7304
7305 /*
7306 * Check the physical page level access and mapping.
7307 */
7308 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7309 { /* probably likely */ }
7310 else
7311 {
7312 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7313 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7314 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7315 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7316 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7317 if (rcStrict == VINF_SUCCESS)
7318 return pbMem;
7319 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7320 }
7321 }
7322 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7323
7324 if (pbMem)
7325 {
7326 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7327 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7328 fAccess |= IEM_ACCESS_NOT_LOCKED;
7329 }
7330 else
7331 {
7332 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7333 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7334 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7335 if (rcStrict == VINF_SUCCESS)
7336 {
7337 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7338 return pbMem;
7339 }
7340 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7341 }
7342
7343 void * const pvMem = pbMem;
7344
7345 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7346 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7347 if (fAccess & IEM_ACCESS_TYPE_READ)
7348 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7349
7350#else /* !IEM_WITH_DATA_TLB */
7351
7352
7353 RTGCPHYS GCPhysFirst;
7354 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7355 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7356 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7357
7358 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7359 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7360 if (fAccess & IEM_ACCESS_TYPE_READ)
7361 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7362
7363 void *pvMem;
7364 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7365 if (rcStrict == VINF_SUCCESS)
7366 { /* likely */ }
7367 else
7368 {
7369 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7370 if (rcStrict == VINF_SUCCESS)
7371 return pvMem;
7372 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7373 }
7374
7375#endif /* !IEM_WITH_DATA_TLB */
7376
7377 /*
7378 * Fill in the mapping table entry.
7379 */
7380 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7381 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7382 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7383 pVCpu->iem.s.cActiveMappings++;
7384
7385 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7386
7387 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7388 return pvMem;
7389}
7390
7391
7392/** @see iemMemMapJmp */
7393static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7394 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7395{
7396 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7397}
7398
7399
7400/**
7401 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7402 *
7403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7404 * @param pvMem The mapping.
7405 * @param fAccess The kind of access.
7406 */
7407void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7408{
7409 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7410 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7411 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7412 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7413 == ((unsigned)bUnmapInfo >> 4),
7414 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7415
7416 /* If it's bounce buffered, we may need to write back the buffer. */
7417 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7418 {
7419 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7420 {
7421 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7422 if (rcStrict == VINF_SUCCESS)
7423 return;
7424 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7425 }
7426 }
7427 /* Otherwise unlock it. */
7428 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7429 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7430
7431 /* Free the entry. */
7432 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7433 Assert(pVCpu->iem.s.cActiveMappings != 0);
7434 pVCpu->iem.s.cActiveMappings--;
7435}
7436
7437
7438/** Fallback for iemMemCommitAndUnmapRwJmp. */
7439void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7440{
7441 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7442 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7443}
7444
7445
7446/** Fallback for iemMemCommitAndUnmapAtJmp. */
7447void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7448{
7449 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7450 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7451}
7452
7453
7454/** Fallback for iemMemCommitAndUnmapWoJmp. */
7455void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7456{
7457 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7458 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7459}
7460
7461
7462/** Fallback for iemMemCommitAndUnmapRoJmp. */
7463void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7464{
7465 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7466 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7467}
7468
7469
7470/** Fallback for iemMemRollbackAndUnmapWo. */
7471void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7472{
7473 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7474 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7475}
7476
7477#endif /* IEM_WITH_SETJMP */
7478
7479#ifndef IN_RING3
7480/**
7481 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7482 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7483 *
7484 * Allows the instruction to be completed and retired, while the IEM user will
7485 * return to ring-3 immediately afterwards and do the postponed writes there.
7486 *
7487 * @returns VBox status code (no strict statuses). Caller must check
7488 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7490 * @param pvMem The mapping.
7491 * @param fAccess The kind of access.
7492 */
7493VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7494{
7495 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7496 AssertMsgReturn( (bUnmapInfo & 0x08)
7497 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7498 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7499 == ((unsigned)bUnmapInfo >> 4),
7500 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7501 VERR_NOT_FOUND);
7502
7503 /* If it's bounce buffered, we may need to write back the buffer. */
7504 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7505 {
7506 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7507 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7508 }
7509 /* Otherwise unlock it. */
7510 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7511 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7512
7513 /* Free the entry. */
7514 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7515 Assert(pVCpu->iem.s.cActiveMappings != 0);
7516 pVCpu->iem.s.cActiveMappings--;
7517 return VINF_SUCCESS;
7518}
7519#endif
7520
7521
7522/**
7523 * Rollbacks mappings, releasing page locks and such.
7524 *
7525 * The caller shall only call this after checking cActiveMappings.
7526 *
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 */
7529void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7530{
7531 Assert(pVCpu->iem.s.cActiveMappings > 0);
7532
7533 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7534 while (iMemMap-- > 0)
7535 {
7536 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7537 if (fAccess != IEM_ACCESS_INVALID)
7538 {
7539 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7540 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7541 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7542 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7543 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7544 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7545 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7547 pVCpu->iem.s.cActiveMappings--;
7548 }
7549 }
7550}
7551
7552
7553/*
7554 * Instantiate R/W templates.
7555 */
7556#define TMPL_MEM_WITH_STACK
7557
7558#define TMPL_MEM_TYPE uint8_t
7559#define TMPL_MEM_FN_SUFF U8
7560#define TMPL_MEM_FMT_TYPE "%#04x"
7561#define TMPL_MEM_FMT_DESC "byte"
7562#include "IEMAllMemRWTmpl.cpp.h"
7563
7564#define TMPL_MEM_TYPE uint16_t
7565#define TMPL_MEM_FN_SUFF U16
7566#define TMPL_MEM_FMT_TYPE "%#06x"
7567#define TMPL_MEM_FMT_DESC "word"
7568#include "IEMAllMemRWTmpl.cpp.h"
7569
7570#define TMPL_WITH_PUSH_SREG
7571#define TMPL_MEM_TYPE uint32_t
7572#define TMPL_MEM_FN_SUFF U32
7573#define TMPL_MEM_FMT_TYPE "%#010x"
7574#define TMPL_MEM_FMT_DESC "dword"
7575#include "IEMAllMemRWTmpl.cpp.h"
7576#undef TMPL_WITH_PUSH_SREG
7577
7578#define TMPL_MEM_TYPE uint64_t
7579#define TMPL_MEM_FN_SUFF U64
7580#define TMPL_MEM_FMT_TYPE "%#018RX64"
7581#define TMPL_MEM_FMT_DESC "qword"
7582#include "IEMAllMemRWTmpl.cpp.h"
7583
7584#undef TMPL_MEM_WITH_STACK
7585
7586#define TMPL_MEM_TYPE uint64_t
7587#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7588#define TMPL_MEM_FN_SUFF U64AlignedU128
7589#define TMPL_MEM_FMT_TYPE "%#018RX64"
7590#define TMPL_MEM_FMT_DESC "qword"
7591#include "IEMAllMemRWTmpl.cpp.h"
7592
7593/* See IEMAllMemRWTmplInline.cpp.h */
7594#define TMPL_MEM_BY_REF
7595
7596#define TMPL_MEM_TYPE RTFLOAT80U
7597#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7598#define TMPL_MEM_FN_SUFF R80
7599#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7600#define TMPL_MEM_FMT_DESC "tword"
7601#include "IEMAllMemRWTmpl.cpp.h"
7602
7603#define TMPL_MEM_TYPE RTPBCD80U
7604#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7605#define TMPL_MEM_FN_SUFF D80
7606#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7607#define TMPL_MEM_FMT_DESC "tword"
7608#include "IEMAllMemRWTmpl.cpp.h"
7609
7610#define TMPL_MEM_TYPE RTUINT128U
7611#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7612#define TMPL_MEM_FN_SUFF U128
7613#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7614#define TMPL_MEM_FMT_DESC "dqword"
7615#include "IEMAllMemRWTmpl.cpp.h"
7616
7617#define TMPL_MEM_TYPE RTUINT128U
7618#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7619#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7620#define TMPL_MEM_FN_SUFF U128AlignedSse
7621#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7622#define TMPL_MEM_FMT_DESC "dqword"
7623#include "IEMAllMemRWTmpl.cpp.h"
7624
7625#define TMPL_MEM_TYPE RTUINT128U
7626#define TMPL_MEM_TYPE_ALIGN 0
7627#define TMPL_MEM_FN_SUFF U128NoAc
7628#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7629#define TMPL_MEM_FMT_DESC "dqword"
7630#include "IEMAllMemRWTmpl.cpp.h"
7631
7632#define TMPL_MEM_TYPE RTUINT256U
7633#define TMPL_MEM_TYPE_ALIGN 0
7634#define TMPL_MEM_FN_SUFF U256NoAc
7635#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7636#define TMPL_MEM_FMT_DESC "qqword"
7637#include "IEMAllMemRWTmpl.cpp.h"
7638
7639#define TMPL_MEM_TYPE RTUINT256U
7640#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7641#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7642#define TMPL_MEM_FN_SUFF U256AlignedAvx
7643#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7644#define TMPL_MEM_FMT_DESC "qqword"
7645#include "IEMAllMemRWTmpl.cpp.h"
7646
7647/**
7648 * Fetches a data dword and zero extends it to a qword.
7649 *
7650 * @returns Strict VBox status code.
7651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7652 * @param pu64Dst Where to return the qword.
7653 * @param iSegReg The index of the segment register to use for
7654 * this access. The base and limits are checked.
7655 * @param GCPtrMem The address of the guest memory.
7656 */
7657VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7658{
7659 /* The lazy approach for now... */
7660 uint8_t bUnmapInfo;
7661 uint32_t const *pu32Src;
7662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7663 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7664 if (rc == VINF_SUCCESS)
7665 {
7666 *pu64Dst = *pu32Src;
7667 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7668 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7669 }
7670 return rc;
7671}
7672
7673
7674#ifdef SOME_UNUSED_FUNCTION
7675/**
7676 * Fetches a data dword and sign extends it to a qword.
7677 *
7678 * @returns Strict VBox status code.
7679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7680 * @param pu64Dst Where to return the sign extended value.
7681 * @param iSegReg The index of the segment register to use for
7682 * this access. The base and limits are checked.
7683 * @param GCPtrMem The address of the guest memory.
7684 */
7685VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7686{
7687 /* The lazy approach for now... */
7688 uint8_t bUnmapInfo;
7689 int32_t const *pi32Src;
7690 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7691 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7692 if (rc == VINF_SUCCESS)
7693 {
7694 *pu64Dst = *pi32Src;
7695 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7696 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7697 }
7698#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7699 else
7700 *pu64Dst = 0;
7701#endif
7702 return rc;
7703}
7704#endif
7705
7706
7707/**
7708 * Fetches a descriptor register (lgdt, lidt).
7709 *
7710 * @returns Strict VBox status code.
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param pcbLimit Where to return the limit.
7713 * @param pGCPtrBase Where to return the base.
7714 * @param iSegReg The index of the segment register to use for
7715 * this access. The base and limits are checked.
7716 * @param GCPtrMem The address of the guest memory.
7717 * @param enmOpSize The effective operand size.
7718 */
7719VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7720 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7721{
7722 /*
7723 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7724 * little special:
7725 * - The two reads are done separately.
7726 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7727 * - We suspect the 386 to actually commit the limit before the base in
7728 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7729 * don't try emulate this eccentric behavior, because it's not well
7730 * enough understood and rather hard to trigger.
7731 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7732 */
7733 VBOXSTRICTRC rcStrict;
7734 if (IEM_IS_64BIT_CODE(pVCpu))
7735 {
7736 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7737 if (rcStrict == VINF_SUCCESS)
7738 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7739 }
7740 else
7741 {
7742 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7743 if (enmOpSize == IEMMODE_32BIT)
7744 {
7745 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7746 {
7747 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7748 if (rcStrict == VINF_SUCCESS)
7749 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7750 }
7751 else
7752 {
7753 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7754 if (rcStrict == VINF_SUCCESS)
7755 {
7756 *pcbLimit = (uint16_t)uTmp;
7757 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7758 }
7759 }
7760 if (rcStrict == VINF_SUCCESS)
7761 *pGCPtrBase = uTmp;
7762 }
7763 else
7764 {
7765 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7766 if (rcStrict == VINF_SUCCESS)
7767 {
7768 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7769 if (rcStrict == VINF_SUCCESS)
7770 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7771 }
7772 }
7773 }
7774 return rcStrict;
7775}
7776
7777
7778/**
7779 * Stores a data dqword, SSE aligned.
7780 *
7781 * @returns Strict VBox status code.
7782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7783 * @param iSegReg The index of the segment register to use for
7784 * this access. The base and limits are checked.
7785 * @param GCPtrMem The address of the guest memory.
7786 * @param u128Value The value to store.
7787 */
7788VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7789{
7790 /* The lazy approach for now... */
7791 uint8_t bUnmapInfo;
7792 PRTUINT128U pu128Dst;
7793 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7794 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7795 if (rc == VINF_SUCCESS)
7796 {
7797 pu128Dst->au64[0] = u128Value.au64[0];
7798 pu128Dst->au64[1] = u128Value.au64[1];
7799 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7800 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7801 }
7802 return rc;
7803}
7804
7805
7806#ifdef IEM_WITH_SETJMP
7807/**
7808 * Stores a data dqword, SSE aligned.
7809 *
7810 * @returns Strict VBox status code.
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param iSegReg The index of the segment register to use for
7813 * this access. The base and limits are checked.
7814 * @param GCPtrMem The address of the guest memory.
7815 * @param u128Value The value to store.
7816 */
7817void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7818 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7819{
7820 /* The lazy approach for now... */
7821 uint8_t bUnmapInfo;
7822 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7823 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7824 pu128Dst->au64[0] = u128Value.au64[0];
7825 pu128Dst->au64[1] = u128Value.au64[1];
7826 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7827 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7828}
7829#endif
7830
7831
7832/**
7833 * Stores a data dqword.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param iSegReg The index of the segment register to use for
7838 * this access. The base and limits are checked.
7839 * @param GCPtrMem The address of the guest memory.
7840 * @param pu256Value Pointer to the value to store.
7841 */
7842VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7843{
7844 /* The lazy approach for now... */
7845 uint8_t bUnmapInfo;
7846 PRTUINT256U pu256Dst;
7847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7848 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7849 if (rc == VINF_SUCCESS)
7850 {
7851 pu256Dst->au64[0] = pu256Value->au64[0];
7852 pu256Dst->au64[1] = pu256Value->au64[1];
7853 pu256Dst->au64[2] = pu256Value->au64[2];
7854 pu256Dst->au64[3] = pu256Value->au64[3];
7855 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7856 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7857 }
7858 return rc;
7859}
7860
7861
7862#ifdef IEM_WITH_SETJMP
7863/**
7864 * Stores a data dqword, longjmp on error.
7865 *
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param iSegReg The index of the segment register to use for
7868 * this access. The base and limits are checked.
7869 * @param GCPtrMem The address of the guest memory.
7870 * @param pu256Value Pointer to the value to store.
7871 */
7872void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7873{
7874 /* The lazy approach for now... */
7875 uint8_t bUnmapInfo;
7876 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7877 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7878 pu256Dst->au64[0] = pu256Value->au64[0];
7879 pu256Dst->au64[1] = pu256Value->au64[1];
7880 pu256Dst->au64[2] = pu256Value->au64[2];
7881 pu256Dst->au64[3] = pu256Value->au64[3];
7882 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7883 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7884}
7885#endif
7886
7887
7888/**
7889 * Stores a descriptor register (sgdt, sidt).
7890 *
7891 * @returns Strict VBox status code.
7892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7893 * @param cbLimit The limit.
7894 * @param GCPtrBase The base address.
7895 * @param iSegReg The index of the segment register to use for
7896 * this access. The base and limits are checked.
7897 * @param GCPtrMem The address of the guest memory.
7898 */
7899VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7900{
7901 /*
7902 * The SIDT and SGDT instructions actually stores the data using two
7903 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7904 * does not respond to opsize prefixes.
7905 */
7906 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7907 if (rcStrict == VINF_SUCCESS)
7908 {
7909 if (IEM_IS_16BIT_CODE(pVCpu))
7910 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7911 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7912 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7913 else if (IEM_IS_32BIT_CODE(pVCpu))
7914 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7915 else
7916 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7917 }
7918 return rcStrict;
7919}
7920
7921
7922/**
7923 * Begin a special stack push (used by interrupt, exceptions and such).
7924 *
7925 * This will raise \#SS or \#PF if appropriate.
7926 *
7927 * @returns Strict VBox status code.
7928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7929 * @param cbMem The number of bytes to push onto the stack.
7930 * @param cbAlign The alignment mask (7, 3, 1).
7931 * @param ppvMem Where to return the pointer to the stack memory.
7932 * As with the other memory functions this could be
7933 * direct access or bounce buffered access, so
7934 * don't commit register until the commit call
7935 * succeeds.
7936 * @param pbUnmapInfo Where to store unmap info for
7937 * iemMemStackPushCommitSpecial.
7938 * @param puNewRsp Where to return the new RSP value. This must be
7939 * passed unchanged to
7940 * iemMemStackPushCommitSpecial().
7941 */
7942VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7943 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7944{
7945 Assert(cbMem < UINT8_MAX);
7946 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7947 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7948}
7949
7950
7951/**
7952 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7953 *
7954 * This will update the rSP.
7955 *
7956 * @returns Strict VBox status code.
7957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7958 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7959 * @param uNewRsp The new RSP value returned by
7960 * iemMemStackPushBeginSpecial().
7961 */
7962VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7963{
7964 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7965 if (rcStrict == VINF_SUCCESS)
7966 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7967 return rcStrict;
7968}
7969
7970
7971/**
7972 * Begin a special stack pop (used by iret, retf and such).
7973 *
7974 * This will raise \#SS or \#PF if appropriate.
7975 *
7976 * @returns Strict VBox status code.
7977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7978 * @param cbMem The number of bytes to pop from the stack.
7979 * @param cbAlign The alignment mask (7, 3, 1).
7980 * @param ppvMem Where to return the pointer to the stack memory.
7981 * @param pbUnmapInfo Where to store unmap info for
7982 * iemMemStackPopDoneSpecial.
7983 * @param puNewRsp Where to return the new RSP value. This must be
7984 * assigned to CPUMCTX::rsp manually some time
7985 * after iemMemStackPopDoneSpecial() has been
7986 * called.
7987 */
7988VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7989 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7990{
7991 Assert(cbMem < UINT8_MAX);
7992 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7993 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7994}
7995
7996
7997/**
7998 * Continue a special stack pop (used by iret and retf), for the purpose of
7999 * retrieving a new stack pointer.
8000 *
8001 * This will raise \#SS or \#PF if appropriate.
8002 *
8003 * @returns Strict VBox status code.
8004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8005 * @param off Offset from the top of the stack. This is zero
8006 * except in the retf case.
8007 * @param cbMem The number of bytes to pop from the stack.
8008 * @param ppvMem Where to return the pointer to the stack memory.
8009 * @param pbUnmapInfo Where to store unmap info for
8010 * iemMemStackPopDoneSpecial.
8011 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8012 * return this because all use of this function is
8013 * to retrieve a new value and anything we return
8014 * here would be discarded.)
8015 */
8016VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8017 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8018{
8019 Assert(cbMem < UINT8_MAX);
8020
8021 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8022 RTGCPTR GCPtrTop;
8023 if (IEM_IS_64BIT_CODE(pVCpu))
8024 GCPtrTop = uCurNewRsp;
8025 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8026 GCPtrTop = (uint32_t)uCurNewRsp;
8027 else
8028 GCPtrTop = (uint16_t)uCurNewRsp;
8029
8030 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8031 0 /* checked in iemMemStackPopBeginSpecial */);
8032}
8033
8034
8035/**
8036 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8037 * iemMemStackPopContinueSpecial).
8038 *
8039 * The caller will manually commit the rSP.
8040 *
8041 * @returns Strict VBox status code.
8042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8043 * @param bUnmapInfo Unmap information returned by
8044 * iemMemStackPopBeginSpecial() or
8045 * iemMemStackPopContinueSpecial().
8046 */
8047VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8048{
8049 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8050}
8051
8052
8053/**
8054 * Fetches a system table byte.
8055 *
8056 * @returns Strict VBox status code.
8057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8058 * @param pbDst Where to return the byte.
8059 * @param iSegReg The index of the segment register to use for
8060 * this access. The base and limits are checked.
8061 * @param GCPtrMem The address of the guest memory.
8062 */
8063VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8064{
8065 /* The lazy approach for now... */
8066 uint8_t bUnmapInfo;
8067 uint8_t const *pbSrc;
8068 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8069 if (rc == VINF_SUCCESS)
8070 {
8071 *pbDst = *pbSrc;
8072 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8073 }
8074 return rc;
8075}
8076
8077
8078/**
8079 * Fetches a system table word.
8080 *
8081 * @returns Strict VBox status code.
8082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8083 * @param pu16Dst Where to return the word.
8084 * @param iSegReg The index of the segment register to use for
8085 * this access. The base and limits are checked.
8086 * @param GCPtrMem The address of the guest memory.
8087 */
8088VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8089{
8090 /* The lazy approach for now... */
8091 uint8_t bUnmapInfo;
8092 uint16_t const *pu16Src;
8093 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8094 if (rc == VINF_SUCCESS)
8095 {
8096 *pu16Dst = *pu16Src;
8097 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8098 }
8099 return rc;
8100}
8101
8102
8103/**
8104 * Fetches a system table dword.
8105 *
8106 * @returns Strict VBox status code.
8107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8108 * @param pu32Dst Where to return the dword.
8109 * @param iSegReg The index of the segment register to use for
8110 * this access. The base and limits are checked.
8111 * @param GCPtrMem The address of the guest memory.
8112 */
8113VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8114{
8115 /* The lazy approach for now... */
8116 uint8_t bUnmapInfo;
8117 uint32_t const *pu32Src;
8118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8119 if (rc == VINF_SUCCESS)
8120 {
8121 *pu32Dst = *pu32Src;
8122 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8123 }
8124 return rc;
8125}
8126
8127
8128/**
8129 * Fetches a system table qword.
8130 *
8131 * @returns Strict VBox status code.
8132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8133 * @param pu64Dst Where to return the qword.
8134 * @param iSegReg The index of the segment register to use for
8135 * this access. The base and limits are checked.
8136 * @param GCPtrMem The address of the guest memory.
8137 */
8138VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8139{
8140 /* The lazy approach for now... */
8141 uint8_t bUnmapInfo;
8142 uint64_t const *pu64Src;
8143 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8144 if (rc == VINF_SUCCESS)
8145 {
8146 *pu64Dst = *pu64Src;
8147 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8148 }
8149 return rc;
8150}
8151
8152
8153/**
8154 * Fetches a descriptor table entry with caller specified error code.
8155 *
8156 * @returns Strict VBox status code.
8157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8158 * @param pDesc Where to return the descriptor table entry.
8159 * @param uSel The selector which table entry to fetch.
8160 * @param uXcpt The exception to raise on table lookup error.
8161 * @param uErrorCode The error code associated with the exception.
8162 */
8163static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8164 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8165{
8166 AssertPtr(pDesc);
8167 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8168
8169 /** @todo did the 286 require all 8 bytes to be accessible? */
8170 /*
8171 * Get the selector table base and check bounds.
8172 */
8173 RTGCPTR GCPtrBase;
8174 if (uSel & X86_SEL_LDT)
8175 {
8176 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8177 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8178 {
8179 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8180 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8181 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8182 uErrorCode, 0);
8183 }
8184
8185 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8186 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8187 }
8188 else
8189 {
8190 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8191 {
8192 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8193 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8194 uErrorCode, 0);
8195 }
8196 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8197 }
8198
8199 /*
8200 * Read the legacy descriptor and maybe the long mode extensions if
8201 * required.
8202 */
8203 VBOXSTRICTRC rcStrict;
8204 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8205 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8206 else
8207 {
8208 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8209 if (rcStrict == VINF_SUCCESS)
8210 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8211 if (rcStrict == VINF_SUCCESS)
8212 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8213 if (rcStrict == VINF_SUCCESS)
8214 pDesc->Legacy.au16[3] = 0;
8215 else
8216 return rcStrict;
8217 }
8218
8219 if (rcStrict == VINF_SUCCESS)
8220 {
8221 if ( !IEM_IS_LONG_MODE(pVCpu)
8222 || pDesc->Legacy.Gen.u1DescType)
8223 pDesc->Long.au64[1] = 0;
8224 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8225 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8226 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8227 else
8228 {
8229 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8230 /** @todo is this the right exception? */
8231 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8232 }
8233 }
8234 return rcStrict;
8235}
8236
8237
8238/**
8239 * Fetches a descriptor table entry.
8240 *
8241 * @returns Strict VBox status code.
8242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8243 * @param pDesc Where to return the descriptor table entry.
8244 * @param uSel The selector which table entry to fetch.
8245 * @param uXcpt The exception to raise on table lookup error.
8246 */
8247VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8248{
8249 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8250}
8251
8252
8253/**
8254 * Marks the selector descriptor as accessed (only non-system descriptors).
8255 *
8256 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8257 * will therefore skip the limit checks.
8258 *
8259 * @returns Strict VBox status code.
8260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8261 * @param uSel The selector.
8262 */
8263VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8264{
8265 /*
8266 * Get the selector table base and calculate the entry address.
8267 */
8268 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8269 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8270 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8271 GCPtr += uSel & X86_SEL_MASK;
8272
8273 /*
8274 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8275 * ugly stuff to avoid this. This will make sure it's an atomic access
8276 * as well more or less remove any question about 8-bit or 32-bit accesss.
8277 */
8278 VBOXSTRICTRC rcStrict;
8279 uint8_t bUnmapInfo;
8280 uint32_t volatile *pu32;
8281 if ((GCPtr & 3) == 0)
8282 {
8283 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8284 GCPtr += 2 + 2;
8285 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8286 if (rcStrict != VINF_SUCCESS)
8287 return rcStrict;
8288 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8289 }
8290 else
8291 {
8292 /* The misaligned GDT/LDT case, map the whole thing. */
8293 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8294 if (rcStrict != VINF_SUCCESS)
8295 return rcStrict;
8296 switch ((uintptr_t)pu32 & 3)
8297 {
8298 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8299 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8300 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8301 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8302 }
8303 }
8304
8305 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8306}
8307
8308
8309#undef LOG_GROUP
8310#define LOG_GROUP LOG_GROUP_IEM
8311
8312/** @} */
8313
8314/** @name Opcode Helpers.
8315 * @{
8316 */
8317
8318/**
8319 * Calculates the effective address of a ModR/M memory operand.
8320 *
8321 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8322 *
8323 * @return Strict VBox status code.
8324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8325 * @param bRm The ModRM byte.
8326 * @param cbImmAndRspOffset - First byte: The size of any immediate
8327 * following the effective address opcode bytes
8328 * (only for RIP relative addressing).
8329 * - Second byte: RSP displacement (for POP [ESP]).
8330 * @param pGCPtrEff Where to return the effective address.
8331 */
8332VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8333{
8334 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8335# define SET_SS_DEF() \
8336 do \
8337 { \
8338 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8339 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8340 } while (0)
8341
8342 if (!IEM_IS_64BIT_CODE(pVCpu))
8343 {
8344/** @todo Check the effective address size crap! */
8345 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8346 {
8347 uint16_t u16EffAddr;
8348
8349 /* Handle the disp16 form with no registers first. */
8350 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8351 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8352 else
8353 {
8354 /* Get the displacment. */
8355 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8356 {
8357 case 0: u16EffAddr = 0; break;
8358 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8359 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8360 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8361 }
8362
8363 /* Add the base and index registers to the disp. */
8364 switch (bRm & X86_MODRM_RM_MASK)
8365 {
8366 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8367 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8368 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8369 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8370 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8371 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8372 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8373 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8374 }
8375 }
8376
8377 *pGCPtrEff = u16EffAddr;
8378 }
8379 else
8380 {
8381 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8382 uint32_t u32EffAddr;
8383
8384 /* Handle the disp32 form with no registers first. */
8385 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8386 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8387 else
8388 {
8389 /* Get the register (or SIB) value. */
8390 switch ((bRm & X86_MODRM_RM_MASK))
8391 {
8392 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8393 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8394 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8395 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8396 case 4: /* SIB */
8397 {
8398 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8399
8400 /* Get the index and scale it. */
8401 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8402 {
8403 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8404 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8405 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8406 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8407 case 4: u32EffAddr = 0; /*none */ break;
8408 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8409 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8410 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8412 }
8413 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8414
8415 /* add base */
8416 switch (bSib & X86_SIB_BASE_MASK)
8417 {
8418 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8419 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8420 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8421 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8422 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8423 case 5:
8424 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8425 {
8426 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8427 SET_SS_DEF();
8428 }
8429 else
8430 {
8431 uint32_t u32Disp;
8432 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8433 u32EffAddr += u32Disp;
8434 }
8435 break;
8436 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8437 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8438 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8439 }
8440 break;
8441 }
8442 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8443 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8444 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8445 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8446 }
8447
8448 /* Get and add the displacement. */
8449 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8450 {
8451 case 0:
8452 break;
8453 case 1:
8454 {
8455 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8456 u32EffAddr += i8Disp;
8457 break;
8458 }
8459 case 2:
8460 {
8461 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8462 u32EffAddr += u32Disp;
8463 break;
8464 }
8465 default:
8466 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8467 }
8468
8469 }
8470 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8471 *pGCPtrEff = u32EffAddr;
8472 }
8473 }
8474 else
8475 {
8476 uint64_t u64EffAddr;
8477
8478 /* Handle the rip+disp32 form with no registers first. */
8479 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8480 {
8481 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8482 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8483 }
8484 else
8485 {
8486 /* Get the register (or SIB) value. */
8487 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8488 {
8489 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8490 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8491 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8492 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8493 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8494 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8495 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8496 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8497 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8498 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8499 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8500 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8501 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8502 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8503 /* SIB */
8504 case 4:
8505 case 12:
8506 {
8507 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8508
8509 /* Get the index and scale it. */
8510 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8511 {
8512 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8513 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8514 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8515 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8516 case 4: u64EffAddr = 0; /*none */ break;
8517 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8518 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8519 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8520 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8521 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8522 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8523 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8524 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8525 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8526 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8527 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8528 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8529 }
8530 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8531
8532 /* add base */
8533 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8534 {
8535 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8536 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8537 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8538 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8539 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8540 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8541 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8542 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8543 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8544 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8545 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8546 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8547 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8548 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8549 /* complicated encodings */
8550 case 5:
8551 case 13:
8552 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8553 {
8554 if (!pVCpu->iem.s.uRexB)
8555 {
8556 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8557 SET_SS_DEF();
8558 }
8559 else
8560 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8561 }
8562 else
8563 {
8564 uint32_t u32Disp;
8565 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8566 u64EffAddr += (int32_t)u32Disp;
8567 }
8568 break;
8569 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8570 }
8571 break;
8572 }
8573 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8574 }
8575
8576 /* Get and add the displacement. */
8577 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8578 {
8579 case 0:
8580 break;
8581 case 1:
8582 {
8583 int8_t i8Disp;
8584 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8585 u64EffAddr += i8Disp;
8586 break;
8587 }
8588 case 2:
8589 {
8590 uint32_t u32Disp;
8591 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8592 u64EffAddr += (int32_t)u32Disp;
8593 break;
8594 }
8595 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8596 }
8597
8598 }
8599
8600 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8601 *pGCPtrEff = u64EffAddr;
8602 else
8603 {
8604 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8605 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8606 }
8607 }
8608
8609 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8610 return VINF_SUCCESS;
8611}
8612
8613
8614#ifdef IEM_WITH_SETJMP
8615/**
8616 * Calculates the effective address of a ModR/M memory operand.
8617 *
8618 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8619 *
8620 * May longjmp on internal error.
8621 *
8622 * @return The effective address.
8623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8624 * @param bRm The ModRM byte.
8625 * @param cbImmAndRspOffset - First byte: The size of any immediate
8626 * following the effective address opcode bytes
8627 * (only for RIP relative addressing).
8628 * - Second byte: RSP displacement (for POP [ESP]).
8629 */
8630RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8631{
8632 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8633# define SET_SS_DEF() \
8634 do \
8635 { \
8636 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8637 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8638 } while (0)
8639
8640 if (!IEM_IS_64BIT_CODE(pVCpu))
8641 {
8642/** @todo Check the effective address size crap! */
8643 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8644 {
8645 uint16_t u16EffAddr;
8646
8647 /* Handle the disp16 form with no registers first. */
8648 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8649 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8650 else
8651 {
8652 /* Get the displacment. */
8653 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8654 {
8655 case 0: u16EffAddr = 0; break;
8656 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8657 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8658 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8659 }
8660
8661 /* Add the base and index registers to the disp. */
8662 switch (bRm & X86_MODRM_RM_MASK)
8663 {
8664 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8665 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8666 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8667 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8668 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8669 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8670 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8671 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8672 }
8673 }
8674
8675 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8676 return u16EffAddr;
8677 }
8678
8679 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8680 uint32_t u32EffAddr;
8681
8682 /* Handle the disp32 form with no registers first. */
8683 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8684 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8685 else
8686 {
8687 /* Get the register (or SIB) value. */
8688 switch ((bRm & X86_MODRM_RM_MASK))
8689 {
8690 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8691 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8692 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8693 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8694 case 4: /* SIB */
8695 {
8696 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8697
8698 /* Get the index and scale it. */
8699 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8700 {
8701 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8702 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8703 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8704 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8705 case 4: u32EffAddr = 0; /*none */ break;
8706 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8707 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8708 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8709 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8710 }
8711 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8712
8713 /* add base */
8714 switch (bSib & X86_SIB_BASE_MASK)
8715 {
8716 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8717 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8718 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8719 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8720 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8721 case 5:
8722 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8723 {
8724 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8725 SET_SS_DEF();
8726 }
8727 else
8728 {
8729 uint32_t u32Disp;
8730 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8731 u32EffAddr += u32Disp;
8732 }
8733 break;
8734 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8735 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8737 }
8738 break;
8739 }
8740 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8741 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8742 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8743 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8744 }
8745
8746 /* Get and add the displacement. */
8747 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8748 {
8749 case 0:
8750 break;
8751 case 1:
8752 {
8753 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8754 u32EffAddr += i8Disp;
8755 break;
8756 }
8757 case 2:
8758 {
8759 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8760 u32EffAddr += u32Disp;
8761 break;
8762 }
8763 default:
8764 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8765 }
8766 }
8767
8768 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8769 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8770 return u32EffAddr;
8771 }
8772
8773 uint64_t u64EffAddr;
8774
8775 /* Handle the rip+disp32 form with no registers first. */
8776 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8777 {
8778 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8779 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8780 }
8781 else
8782 {
8783 /* Get the register (or SIB) value. */
8784 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8785 {
8786 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8787 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8788 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8789 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8790 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8791 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8792 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8793 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8794 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8795 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8796 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8797 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8798 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8799 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8800 /* SIB */
8801 case 4:
8802 case 12:
8803 {
8804 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8805
8806 /* Get the index and scale it. */
8807 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8808 {
8809 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8810 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8811 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8812 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8813 case 4: u64EffAddr = 0; /*none */ break;
8814 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8815 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8816 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8817 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8818 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8819 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8820 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8821 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8822 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8823 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8824 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8825 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8826 }
8827 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8828
8829 /* add base */
8830 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8831 {
8832 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8833 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8834 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8835 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8836 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8837 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8838 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8839 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8840 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8841 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8842 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8843 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8844 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8845 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8846 /* complicated encodings */
8847 case 5:
8848 case 13:
8849 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8850 {
8851 if (!pVCpu->iem.s.uRexB)
8852 {
8853 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8854 SET_SS_DEF();
8855 }
8856 else
8857 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8858 }
8859 else
8860 {
8861 uint32_t u32Disp;
8862 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8863 u64EffAddr += (int32_t)u32Disp;
8864 }
8865 break;
8866 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8867 }
8868 break;
8869 }
8870 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8871 }
8872
8873 /* Get and add the displacement. */
8874 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8875 {
8876 case 0:
8877 break;
8878 case 1:
8879 {
8880 int8_t i8Disp;
8881 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8882 u64EffAddr += i8Disp;
8883 break;
8884 }
8885 case 2:
8886 {
8887 uint32_t u32Disp;
8888 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8889 u64EffAddr += (int32_t)u32Disp;
8890 break;
8891 }
8892 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8893 }
8894
8895 }
8896
8897 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8898 {
8899 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8900 return u64EffAddr;
8901 }
8902 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8903 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8904 return u64EffAddr & UINT32_MAX;
8905}
8906#endif /* IEM_WITH_SETJMP */
8907
8908
8909/**
8910 * Calculates the effective address of a ModR/M memory operand, extended version
8911 * for use in the recompilers.
8912 *
8913 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8914 *
8915 * @return Strict VBox status code.
8916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8917 * @param bRm The ModRM byte.
8918 * @param cbImmAndRspOffset - First byte: The size of any immediate
8919 * following the effective address opcode bytes
8920 * (only for RIP relative addressing).
8921 * - Second byte: RSP displacement (for POP [ESP]).
8922 * @param pGCPtrEff Where to return the effective address.
8923 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8924 * SIB byte (bits 39:32).
8925 */
8926VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8927{
8928 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8929# define SET_SS_DEF() \
8930 do \
8931 { \
8932 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8933 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8934 } while (0)
8935
8936 uint64_t uInfo;
8937 if (!IEM_IS_64BIT_CODE(pVCpu))
8938 {
8939/** @todo Check the effective address size crap! */
8940 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8941 {
8942 uint16_t u16EffAddr;
8943
8944 /* Handle the disp16 form with no registers first. */
8945 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8946 {
8947 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8948 uInfo = u16EffAddr;
8949 }
8950 else
8951 {
8952 /* Get the displacment. */
8953 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8954 {
8955 case 0: u16EffAddr = 0; break;
8956 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8957 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8958 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8959 }
8960 uInfo = u16EffAddr;
8961
8962 /* Add the base and index registers to the disp. */
8963 switch (bRm & X86_MODRM_RM_MASK)
8964 {
8965 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8966 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8967 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8968 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8969 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8970 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8971 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8972 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8973 }
8974 }
8975
8976 *pGCPtrEff = u16EffAddr;
8977 }
8978 else
8979 {
8980 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8981 uint32_t u32EffAddr;
8982
8983 /* Handle the disp32 form with no registers first. */
8984 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8985 {
8986 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8987 uInfo = u32EffAddr;
8988 }
8989 else
8990 {
8991 /* Get the register (or SIB) value. */
8992 uInfo = 0;
8993 switch ((bRm & X86_MODRM_RM_MASK))
8994 {
8995 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8996 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8997 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8998 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8999 case 4: /* SIB */
9000 {
9001 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9002 uInfo = (uint64_t)bSib << 32;
9003
9004 /* Get the index and scale it. */
9005 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9006 {
9007 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9008 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9009 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9010 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9011 case 4: u32EffAddr = 0; /*none */ break;
9012 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9013 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9014 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9016 }
9017 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9018
9019 /* add base */
9020 switch (bSib & X86_SIB_BASE_MASK)
9021 {
9022 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9023 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9024 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9025 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9026 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9027 case 5:
9028 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9029 {
9030 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9031 SET_SS_DEF();
9032 }
9033 else
9034 {
9035 uint32_t u32Disp;
9036 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9037 u32EffAddr += u32Disp;
9038 uInfo |= u32Disp;
9039 }
9040 break;
9041 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9042 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9044 }
9045 break;
9046 }
9047 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9048 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9049 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9050 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9051 }
9052
9053 /* Get and add the displacement. */
9054 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9055 {
9056 case 0:
9057 break;
9058 case 1:
9059 {
9060 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9061 u32EffAddr += i8Disp;
9062 uInfo |= (uint32_t)(int32_t)i8Disp;
9063 break;
9064 }
9065 case 2:
9066 {
9067 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9068 u32EffAddr += u32Disp;
9069 uInfo |= (uint32_t)u32Disp;
9070 break;
9071 }
9072 default:
9073 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9074 }
9075
9076 }
9077 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9078 *pGCPtrEff = u32EffAddr;
9079 }
9080 }
9081 else
9082 {
9083 uint64_t u64EffAddr;
9084
9085 /* Handle the rip+disp32 form with no registers first. */
9086 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9087 {
9088 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9089 uInfo = (uint32_t)u64EffAddr;
9090 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9091 }
9092 else
9093 {
9094 /* Get the register (or SIB) value. */
9095 uInfo = 0;
9096 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9097 {
9098 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9099 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9100 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9101 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9102 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9103 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9104 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9105 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9106 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9107 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9108 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9109 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9110 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9111 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9112 /* SIB */
9113 case 4:
9114 case 12:
9115 {
9116 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9117 uInfo = (uint64_t)bSib << 32;
9118
9119 /* Get the index and scale it. */
9120 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9121 {
9122 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9123 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9124 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9125 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9126 case 4: u64EffAddr = 0; /*none */ break;
9127 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9128 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9129 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9130 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9131 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9132 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9133 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9134 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9135 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9136 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9137 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9139 }
9140 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9141
9142 /* add base */
9143 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9144 {
9145 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9146 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9147 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9148 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9149 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9150 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9151 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9152 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9153 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9154 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9155 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9156 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9157 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9158 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9159 /* complicated encodings */
9160 case 5:
9161 case 13:
9162 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9163 {
9164 if (!pVCpu->iem.s.uRexB)
9165 {
9166 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9167 SET_SS_DEF();
9168 }
9169 else
9170 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9171 }
9172 else
9173 {
9174 uint32_t u32Disp;
9175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9176 u64EffAddr += (int32_t)u32Disp;
9177 uInfo |= u32Disp;
9178 }
9179 break;
9180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9181 }
9182 break;
9183 }
9184 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9185 }
9186
9187 /* Get and add the displacement. */
9188 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9189 {
9190 case 0:
9191 break;
9192 case 1:
9193 {
9194 int8_t i8Disp;
9195 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9196 u64EffAddr += i8Disp;
9197 uInfo |= (uint32_t)(int32_t)i8Disp;
9198 break;
9199 }
9200 case 2:
9201 {
9202 uint32_t u32Disp;
9203 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9204 u64EffAddr += (int32_t)u32Disp;
9205 uInfo |= u32Disp;
9206 break;
9207 }
9208 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9209 }
9210
9211 }
9212
9213 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9214 *pGCPtrEff = u64EffAddr;
9215 else
9216 {
9217 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9218 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9219 }
9220 }
9221 *puInfo = uInfo;
9222
9223 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9224 return VINF_SUCCESS;
9225}
9226
9227/** @} */
9228
9229
9230#ifdef LOG_ENABLED
9231/**
9232 * Logs the current instruction.
9233 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9234 * @param fSameCtx Set if we have the same context information as the VMM,
9235 * clear if we may have already executed an instruction in
9236 * our debug context. When clear, we assume IEMCPU holds
9237 * valid CPU mode info.
9238 *
9239 * The @a fSameCtx parameter is now misleading and obsolete.
9240 * @param pszFunction The IEM function doing the execution.
9241 */
9242static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9243{
9244# ifdef IN_RING3
9245 if (LogIs2Enabled())
9246 {
9247 char szInstr[256];
9248 uint32_t cbInstr = 0;
9249 if (fSameCtx)
9250 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9251 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9252 szInstr, sizeof(szInstr), &cbInstr);
9253 else
9254 {
9255 uint32_t fFlags = 0;
9256 switch (IEM_GET_CPU_MODE(pVCpu))
9257 {
9258 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9259 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9260 case IEMMODE_16BIT:
9261 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9262 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9263 else
9264 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9265 break;
9266 }
9267 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9268 szInstr, sizeof(szInstr), &cbInstr);
9269 }
9270
9271 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9272 Log2(("**** %s fExec=%x\n"
9273 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9274 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9275 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9276 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9277 " %s\n"
9278 , pszFunction, pVCpu->iem.s.fExec,
9279 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9280 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9281 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9282 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9283 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9284 szInstr));
9285
9286 /* This stuff sucks atm. as it fills the log with MSRs. */
9287 //if (LogIs3Enabled())
9288 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9289 }
9290 else
9291# endif
9292 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9293 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9294 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9295}
9296#endif /* LOG_ENABLED */
9297
9298
9299#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9300/**
9301 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9302 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9303 *
9304 * @returns Modified rcStrict.
9305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9306 * @param rcStrict The instruction execution status.
9307 */
9308static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9309{
9310 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9311 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9312 {
9313 /* VMX preemption timer takes priority over NMI-window exits. */
9314 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9315 {
9316 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9317 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9318 }
9319 /*
9320 * Check remaining intercepts.
9321 *
9322 * NMI-window and Interrupt-window VM-exits.
9323 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9324 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9325 *
9326 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9327 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9328 */
9329 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9330 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9331 && !TRPMHasTrap(pVCpu))
9332 {
9333 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9334 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9335 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9336 {
9337 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9338 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9339 }
9340 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9341 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9342 {
9343 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9344 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9345 }
9346 }
9347 }
9348 /* TPR-below threshold/APIC write has the highest priority. */
9349 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9350 {
9351 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9352 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9353 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9354 }
9355 /* MTF takes priority over VMX-preemption timer. */
9356 else
9357 {
9358 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9359 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9360 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9361 }
9362 return rcStrict;
9363}
9364#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9365
9366
9367/**
9368 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9369 * IEMExecOneWithPrefetchedByPC.
9370 *
9371 * Similar code is found in IEMExecLots.
9372 *
9373 * @return Strict VBox status code.
9374 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9375 * @param fExecuteInhibit If set, execute the instruction following CLI,
9376 * POP SS and MOV SS,GR.
9377 * @param pszFunction The calling function name.
9378 */
9379DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9380{
9381 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9382 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9383 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9384 RT_NOREF_PV(pszFunction);
9385
9386#ifdef IEM_WITH_SETJMP
9387 VBOXSTRICTRC rcStrict;
9388 IEM_TRY_SETJMP(pVCpu, rcStrict)
9389 {
9390 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9391 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9392 }
9393 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9394 {
9395 pVCpu->iem.s.cLongJumps++;
9396 }
9397 IEM_CATCH_LONGJMP_END(pVCpu);
9398#else
9399 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9400 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9401#endif
9402 if (rcStrict == VINF_SUCCESS)
9403 pVCpu->iem.s.cInstructions++;
9404 if (pVCpu->iem.s.cActiveMappings > 0)
9405 {
9406 Assert(rcStrict != VINF_SUCCESS);
9407 iemMemRollback(pVCpu);
9408 }
9409 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9410 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9411 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9412
9413//#ifdef DEBUG
9414// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9415//#endif
9416
9417#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9418 /*
9419 * Perform any VMX nested-guest instruction boundary actions.
9420 *
9421 * If any of these causes a VM-exit, we must skip executing the next
9422 * instruction (would run into stale page tables). A VM-exit makes sure
9423 * there is no interrupt-inhibition, so that should ensure we don't go
9424 * to try execute the next instruction. Clearing fExecuteInhibit is
9425 * problematic because of the setjmp/longjmp clobbering above.
9426 */
9427 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9428 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9429 || rcStrict != VINF_SUCCESS)
9430 { /* likely */ }
9431 else
9432 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9433#endif
9434
9435 /* Execute the next instruction as well if a cli, pop ss or
9436 mov ss, Gr has just completed successfully. */
9437 if ( fExecuteInhibit
9438 && rcStrict == VINF_SUCCESS
9439 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9440 {
9441 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9442 if (rcStrict == VINF_SUCCESS)
9443 {
9444#ifdef LOG_ENABLED
9445 iemLogCurInstr(pVCpu, false, pszFunction);
9446#endif
9447#ifdef IEM_WITH_SETJMP
9448 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9449 {
9450 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9451 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9452 }
9453 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9454 {
9455 pVCpu->iem.s.cLongJumps++;
9456 }
9457 IEM_CATCH_LONGJMP_END(pVCpu);
9458#else
9459 IEM_OPCODE_GET_FIRST_U8(&b);
9460 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9461#endif
9462 if (rcStrict == VINF_SUCCESS)
9463 {
9464 pVCpu->iem.s.cInstructions++;
9465#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9466 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9467 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9468 { /* likely */ }
9469 else
9470 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9471#endif
9472 }
9473 if (pVCpu->iem.s.cActiveMappings > 0)
9474 {
9475 Assert(rcStrict != VINF_SUCCESS);
9476 iemMemRollback(pVCpu);
9477 }
9478 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9479 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9480 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9481 }
9482 else if (pVCpu->iem.s.cActiveMappings > 0)
9483 iemMemRollback(pVCpu);
9484 /** @todo drop this after we bake this change into RIP advancing. */
9485 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9486 }
9487
9488 /*
9489 * Return value fiddling, statistics and sanity assertions.
9490 */
9491 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9492
9493 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9494 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9495 return rcStrict;
9496}
9497
9498
9499/**
9500 * Execute one instruction.
9501 *
9502 * @return Strict VBox status code.
9503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9504 */
9505VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9506{
9507 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9508#ifdef LOG_ENABLED
9509 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9510#endif
9511
9512 /*
9513 * Do the decoding and emulation.
9514 */
9515 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9516 if (rcStrict == VINF_SUCCESS)
9517 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9518 else if (pVCpu->iem.s.cActiveMappings > 0)
9519 iemMemRollback(pVCpu);
9520
9521 if (rcStrict != VINF_SUCCESS)
9522 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9523 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9524 return rcStrict;
9525}
9526
9527
9528VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9529{
9530 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9531 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9532 if (rcStrict == VINF_SUCCESS)
9533 {
9534 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9535 if (pcbWritten)
9536 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9537 }
9538 else if (pVCpu->iem.s.cActiveMappings > 0)
9539 iemMemRollback(pVCpu);
9540
9541 return rcStrict;
9542}
9543
9544
9545VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9546 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9547{
9548 VBOXSTRICTRC rcStrict;
9549 if ( cbOpcodeBytes
9550 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9551 {
9552 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9553#ifdef IEM_WITH_CODE_TLB
9554 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9555 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9556 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9557 pVCpu->iem.s.offCurInstrStart = 0;
9558 pVCpu->iem.s.offInstrNextByte = 0;
9559 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9560#else
9561 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9562 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9563#endif
9564 rcStrict = VINF_SUCCESS;
9565 }
9566 else
9567 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9568 if (rcStrict == VINF_SUCCESS)
9569 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9570 else if (pVCpu->iem.s.cActiveMappings > 0)
9571 iemMemRollback(pVCpu);
9572
9573 return rcStrict;
9574}
9575
9576
9577VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9578{
9579 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9580 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9581 if (rcStrict == VINF_SUCCESS)
9582 {
9583 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9584 if (pcbWritten)
9585 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9586 }
9587 else if (pVCpu->iem.s.cActiveMappings > 0)
9588 iemMemRollback(pVCpu);
9589
9590 return rcStrict;
9591}
9592
9593
9594VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9595 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9596{
9597 VBOXSTRICTRC rcStrict;
9598 if ( cbOpcodeBytes
9599 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9600 {
9601 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9602#ifdef IEM_WITH_CODE_TLB
9603 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9604 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9605 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9606 pVCpu->iem.s.offCurInstrStart = 0;
9607 pVCpu->iem.s.offInstrNextByte = 0;
9608 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9609#else
9610 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9611 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9612#endif
9613 rcStrict = VINF_SUCCESS;
9614 }
9615 else
9616 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9617 if (rcStrict == VINF_SUCCESS)
9618 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9619 else if (pVCpu->iem.s.cActiveMappings > 0)
9620 iemMemRollback(pVCpu);
9621
9622 return rcStrict;
9623}
9624
9625
9626/**
9627 * For handling split cacheline lock operations when the host has split-lock
9628 * detection enabled.
9629 *
9630 * This will cause the interpreter to disregard the lock prefix and implicit
9631 * locking (xchg).
9632 *
9633 * @returns Strict VBox status code.
9634 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9635 */
9636VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9637{
9638 /*
9639 * Do the decoding and emulation.
9640 */
9641 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9642 if (rcStrict == VINF_SUCCESS)
9643 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9644 else if (pVCpu->iem.s.cActiveMappings > 0)
9645 iemMemRollback(pVCpu);
9646
9647 if (rcStrict != VINF_SUCCESS)
9648 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9649 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9650 return rcStrict;
9651}
9652
9653
9654/**
9655 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9656 * inject a pending TRPM trap.
9657 */
9658VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9659{
9660 Assert(TRPMHasTrap(pVCpu));
9661
9662 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9663 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9664 {
9665 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9666#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9667 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9668 if (fIntrEnabled)
9669 {
9670 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9671 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9672 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9673 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9674 else
9675 {
9676 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9677 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9678 }
9679 }
9680#else
9681 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9682#endif
9683 if (fIntrEnabled)
9684 {
9685 uint8_t u8TrapNo;
9686 TRPMEVENT enmType;
9687 uint32_t uErrCode;
9688 RTGCPTR uCr2;
9689 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9690 AssertRC(rc2);
9691 Assert(enmType == TRPM_HARDWARE_INT);
9692 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9693
9694 TRPMResetTrap(pVCpu);
9695
9696#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9697 /* Injecting an event may cause a VM-exit. */
9698 if ( rcStrict != VINF_SUCCESS
9699 && rcStrict != VINF_IEM_RAISED_XCPT)
9700 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9701#else
9702 NOREF(rcStrict);
9703#endif
9704 }
9705 }
9706
9707 return VINF_SUCCESS;
9708}
9709
9710
9711VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9712{
9713 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9714 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9715 Assert(cMaxInstructions > 0);
9716
9717 /*
9718 * See if there is an interrupt pending in TRPM, inject it if we can.
9719 */
9720 /** @todo What if we are injecting an exception and not an interrupt? Is that
9721 * possible here? For now we assert it is indeed only an interrupt. */
9722 if (!TRPMHasTrap(pVCpu))
9723 { /* likely */ }
9724 else
9725 {
9726 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9727 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9728 { /*likely */ }
9729 else
9730 return rcStrict;
9731 }
9732
9733 /*
9734 * Initial decoder init w/ prefetch, then setup setjmp.
9735 */
9736 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9737 if (rcStrict == VINF_SUCCESS)
9738 {
9739#ifdef IEM_WITH_SETJMP
9740 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9741 IEM_TRY_SETJMP(pVCpu, rcStrict)
9742#endif
9743 {
9744 /*
9745 * The run loop. We limit ourselves to 4096 instructions right now.
9746 */
9747 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9748 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9749 for (;;)
9750 {
9751 /*
9752 * Log the state.
9753 */
9754#ifdef LOG_ENABLED
9755 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9756#endif
9757
9758 /*
9759 * Do the decoding and emulation.
9760 */
9761 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9762 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9763#ifdef VBOX_STRICT
9764 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9765#endif
9766 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9767 {
9768 Assert(pVCpu->iem.s.cActiveMappings == 0);
9769 pVCpu->iem.s.cInstructions++;
9770
9771#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9772 /* Perform any VMX nested-guest instruction boundary actions. */
9773 uint64_t fCpu = pVCpu->fLocalForcedActions;
9774 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9775 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9776 { /* likely */ }
9777 else
9778 {
9779 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9780 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9781 fCpu = pVCpu->fLocalForcedActions;
9782 else
9783 {
9784 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9785 break;
9786 }
9787 }
9788#endif
9789 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9790 {
9791#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9792 uint64_t fCpu = pVCpu->fLocalForcedActions;
9793#endif
9794 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9795 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9796 | VMCPU_FF_TLB_FLUSH
9797 | VMCPU_FF_UNHALT );
9798
9799 if (RT_LIKELY( ( !fCpu
9800 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9801 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9802 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9803 {
9804 if (--cMaxInstructionsGccStupidity > 0)
9805 {
9806 /* Poll timers every now an then according to the caller's specs. */
9807 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9808 || !TMTimerPollBool(pVM, pVCpu))
9809 {
9810 Assert(pVCpu->iem.s.cActiveMappings == 0);
9811 iemReInitDecoder(pVCpu);
9812 continue;
9813 }
9814 }
9815 }
9816 }
9817 Assert(pVCpu->iem.s.cActiveMappings == 0);
9818 }
9819 else if (pVCpu->iem.s.cActiveMappings > 0)
9820 iemMemRollback(pVCpu);
9821 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9822 break;
9823 }
9824 }
9825#ifdef IEM_WITH_SETJMP
9826 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9827 {
9828 if (pVCpu->iem.s.cActiveMappings > 0)
9829 iemMemRollback(pVCpu);
9830# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9831 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9832# endif
9833 pVCpu->iem.s.cLongJumps++;
9834 }
9835 IEM_CATCH_LONGJMP_END(pVCpu);
9836#endif
9837
9838 /*
9839 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9840 */
9841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9843 }
9844 else
9845 {
9846 if (pVCpu->iem.s.cActiveMappings > 0)
9847 iemMemRollback(pVCpu);
9848
9849#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9850 /*
9851 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9852 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9853 */
9854 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9855#endif
9856 }
9857
9858 /*
9859 * Maybe re-enter raw-mode and log.
9860 */
9861 if (rcStrict != VINF_SUCCESS)
9862 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9863 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9864 if (pcInstructions)
9865 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9866 return rcStrict;
9867}
9868
9869
9870/**
9871 * Interface used by EMExecuteExec, does exit statistics and limits.
9872 *
9873 * @returns Strict VBox status code.
9874 * @param pVCpu The cross context virtual CPU structure.
9875 * @param fWillExit To be defined.
9876 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9877 * @param cMaxInstructions Maximum number of instructions to execute.
9878 * @param cMaxInstructionsWithoutExits
9879 * The max number of instructions without exits.
9880 * @param pStats Where to return statistics.
9881 */
9882VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9883 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9884{
9885 NOREF(fWillExit); /** @todo define flexible exit crits */
9886
9887 /*
9888 * Initialize return stats.
9889 */
9890 pStats->cInstructions = 0;
9891 pStats->cExits = 0;
9892 pStats->cMaxExitDistance = 0;
9893 pStats->cReserved = 0;
9894
9895 /*
9896 * Initial decoder init w/ prefetch, then setup setjmp.
9897 */
9898 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9899 if (rcStrict == VINF_SUCCESS)
9900 {
9901#ifdef IEM_WITH_SETJMP
9902 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9903 IEM_TRY_SETJMP(pVCpu, rcStrict)
9904#endif
9905 {
9906#ifdef IN_RING0
9907 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9908#endif
9909 uint32_t cInstructionSinceLastExit = 0;
9910
9911 /*
9912 * The run loop. We limit ourselves to 4096 instructions right now.
9913 */
9914 PVM pVM = pVCpu->CTX_SUFF(pVM);
9915 for (;;)
9916 {
9917 /*
9918 * Log the state.
9919 */
9920#ifdef LOG_ENABLED
9921 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9922#endif
9923
9924 /*
9925 * Do the decoding and emulation.
9926 */
9927 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9928
9929 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9930 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9931
9932 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9933 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9934 {
9935 pStats->cExits += 1;
9936 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9937 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9938 cInstructionSinceLastExit = 0;
9939 }
9940
9941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9942 {
9943 Assert(pVCpu->iem.s.cActiveMappings == 0);
9944 pVCpu->iem.s.cInstructions++;
9945 pStats->cInstructions++;
9946 cInstructionSinceLastExit++;
9947
9948#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9949 /* Perform any VMX nested-guest instruction boundary actions. */
9950 uint64_t fCpu = pVCpu->fLocalForcedActions;
9951 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9952 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9953 { /* likely */ }
9954 else
9955 {
9956 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9957 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9958 fCpu = pVCpu->fLocalForcedActions;
9959 else
9960 {
9961 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9962 break;
9963 }
9964 }
9965#endif
9966 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9967 {
9968#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9969 uint64_t fCpu = pVCpu->fLocalForcedActions;
9970#endif
9971 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9972 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9973 | VMCPU_FF_TLB_FLUSH
9974 | VMCPU_FF_UNHALT );
9975 if (RT_LIKELY( ( ( !fCpu
9976 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9977 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9978 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9979 || pStats->cInstructions < cMinInstructions))
9980 {
9981 if (pStats->cInstructions < cMaxInstructions)
9982 {
9983 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9984 {
9985#ifdef IN_RING0
9986 if ( !fCheckPreemptionPending
9987 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9988#endif
9989 {
9990 Assert(pVCpu->iem.s.cActiveMappings == 0);
9991 iemReInitDecoder(pVCpu);
9992 continue;
9993 }
9994#ifdef IN_RING0
9995 rcStrict = VINF_EM_RAW_INTERRUPT;
9996 break;
9997#endif
9998 }
9999 }
10000 }
10001 Assert(!(fCpu & VMCPU_FF_IEM));
10002 }
10003 Assert(pVCpu->iem.s.cActiveMappings == 0);
10004 }
10005 else if (pVCpu->iem.s.cActiveMappings > 0)
10006 iemMemRollback(pVCpu);
10007 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10008 break;
10009 }
10010 }
10011#ifdef IEM_WITH_SETJMP
10012 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10013 {
10014 if (pVCpu->iem.s.cActiveMappings > 0)
10015 iemMemRollback(pVCpu);
10016 pVCpu->iem.s.cLongJumps++;
10017 }
10018 IEM_CATCH_LONGJMP_END(pVCpu);
10019#endif
10020
10021 /*
10022 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10023 */
10024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10026 }
10027 else
10028 {
10029 if (pVCpu->iem.s.cActiveMappings > 0)
10030 iemMemRollback(pVCpu);
10031
10032#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10033 /*
10034 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10035 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10036 */
10037 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10038#endif
10039 }
10040
10041 /*
10042 * Maybe re-enter raw-mode and log.
10043 */
10044 if (rcStrict != VINF_SUCCESS)
10045 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10046 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10047 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10048 return rcStrict;
10049}
10050
10051
10052/**
10053 * Injects a trap, fault, abort, software interrupt or external interrupt.
10054 *
10055 * The parameter list matches TRPMQueryTrapAll pretty closely.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10059 * @param u8TrapNo The trap number.
10060 * @param enmType What type is it (trap/fault/abort), software
10061 * interrupt or hardware interrupt.
10062 * @param uErrCode The error code if applicable.
10063 * @param uCr2 The CR2 value if applicable.
10064 * @param cbInstr The instruction length (only relevant for
10065 * software interrupts).
10066 */
10067VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10068 uint8_t cbInstr)
10069{
10070 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10071#ifdef DBGFTRACE_ENABLED
10072 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10073 u8TrapNo, enmType, uErrCode, uCr2);
10074#endif
10075
10076 uint32_t fFlags;
10077 switch (enmType)
10078 {
10079 case TRPM_HARDWARE_INT:
10080 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10081 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10082 uErrCode = uCr2 = 0;
10083 break;
10084
10085 case TRPM_SOFTWARE_INT:
10086 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10087 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10088 uErrCode = uCr2 = 0;
10089 break;
10090
10091 case TRPM_TRAP:
10092 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10093 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10094 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10095 if (u8TrapNo == X86_XCPT_PF)
10096 fFlags |= IEM_XCPT_FLAGS_CR2;
10097 switch (u8TrapNo)
10098 {
10099 case X86_XCPT_DF:
10100 case X86_XCPT_TS:
10101 case X86_XCPT_NP:
10102 case X86_XCPT_SS:
10103 case X86_XCPT_PF:
10104 case X86_XCPT_AC:
10105 case X86_XCPT_GP:
10106 fFlags |= IEM_XCPT_FLAGS_ERR;
10107 break;
10108 }
10109 break;
10110
10111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10112 }
10113
10114 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10115
10116 if (pVCpu->iem.s.cActiveMappings > 0)
10117 iemMemRollback(pVCpu);
10118
10119 return rcStrict;
10120}
10121
10122
10123/**
10124 * Injects the active TRPM event.
10125 *
10126 * @returns Strict VBox status code.
10127 * @param pVCpu The cross context virtual CPU structure.
10128 */
10129VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10130{
10131#ifndef IEM_IMPLEMENTS_TASKSWITCH
10132 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10133#else
10134 uint8_t u8TrapNo;
10135 TRPMEVENT enmType;
10136 uint32_t uErrCode;
10137 RTGCUINTPTR uCr2;
10138 uint8_t cbInstr;
10139 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10140 if (RT_FAILURE(rc))
10141 return rc;
10142
10143 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10144 * ICEBP \#DB injection as a special case. */
10145 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10146#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10147 if (rcStrict == VINF_SVM_VMEXIT)
10148 rcStrict = VINF_SUCCESS;
10149#endif
10150#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10151 if (rcStrict == VINF_VMX_VMEXIT)
10152 rcStrict = VINF_SUCCESS;
10153#endif
10154 /** @todo Are there any other codes that imply the event was successfully
10155 * delivered to the guest? See @bugref{6607}. */
10156 if ( rcStrict == VINF_SUCCESS
10157 || rcStrict == VINF_IEM_RAISED_XCPT)
10158 TRPMResetTrap(pVCpu);
10159
10160 return rcStrict;
10161#endif
10162}
10163
10164
10165VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10166{
10167 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10168 return VERR_NOT_IMPLEMENTED;
10169}
10170
10171
10172VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10173{
10174 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10175 return VERR_NOT_IMPLEMENTED;
10176}
10177
10178
10179/**
10180 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10181 *
10182 * This API ASSUMES that the caller has already verified that the guest code is
10183 * allowed to access the I/O port. (The I/O port is in the DX register in the
10184 * guest state.)
10185 *
10186 * @returns Strict VBox status code.
10187 * @param pVCpu The cross context virtual CPU structure.
10188 * @param cbValue The size of the I/O port access (1, 2, or 4).
10189 * @param enmAddrMode The addressing mode.
10190 * @param fRepPrefix Indicates whether a repeat prefix is used
10191 * (doesn't matter which for this instruction).
10192 * @param cbInstr The instruction length in bytes.
10193 * @param iEffSeg The effective segment address.
10194 * @param fIoChecked Whether the access to the I/O port has been
10195 * checked or not. It's typically checked in the
10196 * HM scenario.
10197 */
10198VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10199 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10200{
10201 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10203
10204 /*
10205 * State init.
10206 */
10207 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10208
10209 /*
10210 * Switch orgy for getting to the right handler.
10211 */
10212 VBOXSTRICTRC rcStrict;
10213 if (fRepPrefix)
10214 {
10215 switch (enmAddrMode)
10216 {
10217 case IEMMODE_16BIT:
10218 switch (cbValue)
10219 {
10220 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10221 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10222 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10223 default:
10224 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10225 }
10226 break;
10227
10228 case IEMMODE_32BIT:
10229 switch (cbValue)
10230 {
10231 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10232 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10233 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10234 default:
10235 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10236 }
10237 break;
10238
10239 case IEMMODE_64BIT:
10240 switch (cbValue)
10241 {
10242 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10243 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10244 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10245 default:
10246 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10247 }
10248 break;
10249
10250 default:
10251 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10252 }
10253 }
10254 else
10255 {
10256 switch (enmAddrMode)
10257 {
10258 case IEMMODE_16BIT:
10259 switch (cbValue)
10260 {
10261 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10262 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10263 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10264 default:
10265 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10266 }
10267 break;
10268
10269 case IEMMODE_32BIT:
10270 switch (cbValue)
10271 {
10272 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10273 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10274 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10275 default:
10276 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10277 }
10278 break;
10279
10280 case IEMMODE_64BIT:
10281 switch (cbValue)
10282 {
10283 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10284 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10285 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10286 default:
10287 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10288 }
10289 break;
10290
10291 default:
10292 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10293 }
10294 }
10295
10296 if (pVCpu->iem.s.cActiveMappings)
10297 iemMemRollback(pVCpu);
10298
10299 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10300}
10301
10302
10303/**
10304 * Interface for HM and EM for executing string I/O IN (read) instructions.
10305 *
10306 * This API ASSUMES that the caller has already verified that the guest code is
10307 * allowed to access the I/O port. (The I/O port is in the DX register in the
10308 * guest state.)
10309 *
10310 * @returns Strict VBox status code.
10311 * @param pVCpu The cross context virtual CPU structure.
10312 * @param cbValue The size of the I/O port access (1, 2, or 4).
10313 * @param enmAddrMode The addressing mode.
10314 * @param fRepPrefix Indicates whether a repeat prefix is used
10315 * (doesn't matter which for this instruction).
10316 * @param cbInstr The instruction length in bytes.
10317 * @param fIoChecked Whether the access to the I/O port has been
10318 * checked or not. It's typically checked in the
10319 * HM scenario.
10320 */
10321VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10322 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10323{
10324 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10325
10326 /*
10327 * State init.
10328 */
10329 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10330
10331 /*
10332 * Switch orgy for getting to the right handler.
10333 */
10334 VBOXSTRICTRC rcStrict;
10335 if (fRepPrefix)
10336 {
10337 switch (enmAddrMode)
10338 {
10339 case IEMMODE_16BIT:
10340 switch (cbValue)
10341 {
10342 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10343 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10344 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10345 default:
10346 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10347 }
10348 break;
10349
10350 case IEMMODE_32BIT:
10351 switch (cbValue)
10352 {
10353 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10354 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10355 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10356 default:
10357 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10358 }
10359 break;
10360
10361 case IEMMODE_64BIT:
10362 switch (cbValue)
10363 {
10364 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10365 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10366 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10367 default:
10368 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10369 }
10370 break;
10371
10372 default:
10373 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10374 }
10375 }
10376 else
10377 {
10378 switch (enmAddrMode)
10379 {
10380 case IEMMODE_16BIT:
10381 switch (cbValue)
10382 {
10383 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10384 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10385 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10386 default:
10387 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10388 }
10389 break;
10390
10391 case IEMMODE_32BIT:
10392 switch (cbValue)
10393 {
10394 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10395 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10396 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10397 default:
10398 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10399 }
10400 break;
10401
10402 case IEMMODE_64BIT:
10403 switch (cbValue)
10404 {
10405 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10406 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10407 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10408 default:
10409 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10410 }
10411 break;
10412
10413 default:
10414 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10415 }
10416 }
10417
10418 if ( pVCpu->iem.s.cActiveMappings == 0
10419 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10420 { /* likely */ }
10421 else
10422 {
10423 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10424 iemMemRollback(pVCpu);
10425 }
10426 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10427}
10428
10429
10430/**
10431 * Interface for rawmode to write execute an OUT instruction.
10432 *
10433 * @returns Strict VBox status code.
10434 * @param pVCpu The cross context virtual CPU structure.
10435 * @param cbInstr The instruction length in bytes.
10436 * @param u16Port The port to read.
10437 * @param fImm Whether the port is specified using an immediate operand or
10438 * using the implicit DX register.
10439 * @param cbReg The register size.
10440 *
10441 * @remarks In ring-0 not all of the state needs to be synced in.
10442 */
10443VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10444{
10445 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10446 Assert(cbReg <= 4 && cbReg != 3);
10447
10448 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10449 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10450 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10451 Assert(!pVCpu->iem.s.cActiveMappings);
10452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10453}
10454
10455
10456/**
10457 * Interface for rawmode to write execute an IN instruction.
10458 *
10459 * @returns Strict VBox status code.
10460 * @param pVCpu The cross context virtual CPU structure.
10461 * @param cbInstr The instruction length in bytes.
10462 * @param u16Port The port to read.
10463 * @param fImm Whether the port is specified using an immediate operand or
10464 * using the implicit DX.
10465 * @param cbReg The register size.
10466 */
10467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10468{
10469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10470 Assert(cbReg <= 4 && cbReg != 3);
10471
10472 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10473 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10474 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10475 Assert(!pVCpu->iem.s.cActiveMappings);
10476 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10477}
10478
10479
10480/**
10481 * Interface for HM and EM to write to a CRx register.
10482 *
10483 * @returns Strict VBox status code.
10484 * @param pVCpu The cross context virtual CPU structure.
10485 * @param cbInstr The instruction length in bytes.
10486 * @param iCrReg The control register number (destination).
10487 * @param iGReg The general purpose register number (source).
10488 *
10489 * @remarks In ring-0 not all of the state needs to be synced in.
10490 */
10491VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10492{
10493 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10494 Assert(iCrReg < 16);
10495 Assert(iGReg < 16);
10496
10497 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10498 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10499 Assert(!pVCpu->iem.s.cActiveMappings);
10500 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10501}
10502
10503
10504/**
10505 * Interface for HM and EM to read from a CRx register.
10506 *
10507 * @returns Strict VBox status code.
10508 * @param pVCpu The cross context virtual CPU structure.
10509 * @param cbInstr The instruction length in bytes.
10510 * @param iGReg The general purpose register number (destination).
10511 * @param iCrReg The control register number (source).
10512 *
10513 * @remarks In ring-0 not all of the state needs to be synced in.
10514 */
10515VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10516{
10517 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10518 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10519 | CPUMCTX_EXTRN_APIC_TPR);
10520 Assert(iCrReg < 16);
10521 Assert(iGReg < 16);
10522
10523 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10524 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10525 Assert(!pVCpu->iem.s.cActiveMappings);
10526 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10527}
10528
10529
10530/**
10531 * Interface for HM and EM to write to a DRx register.
10532 *
10533 * @returns Strict VBox status code.
10534 * @param pVCpu The cross context virtual CPU structure.
10535 * @param cbInstr The instruction length in bytes.
10536 * @param iDrReg The debug register number (destination).
10537 * @param iGReg The general purpose register number (source).
10538 *
10539 * @remarks In ring-0 not all of the state needs to be synced in.
10540 */
10541VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10542{
10543 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10544 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10545 Assert(iDrReg < 8);
10546 Assert(iGReg < 16);
10547
10548 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10550 Assert(!pVCpu->iem.s.cActiveMappings);
10551 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10552}
10553
10554
10555/**
10556 * Interface for HM and EM to read from a DRx register.
10557 *
10558 * @returns Strict VBox status code.
10559 * @param pVCpu The cross context virtual CPU structure.
10560 * @param cbInstr The instruction length in bytes.
10561 * @param iGReg The general purpose register number (destination).
10562 * @param iDrReg The debug register number (source).
10563 *
10564 * @remarks In ring-0 not all of the state needs to be synced in.
10565 */
10566VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10567{
10568 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10569 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10570 Assert(iDrReg < 8);
10571 Assert(iGReg < 16);
10572
10573 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10575 Assert(!pVCpu->iem.s.cActiveMappings);
10576 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10577}
10578
10579
10580/**
10581 * Interface for HM and EM to clear the CR0[TS] bit.
10582 *
10583 * @returns Strict VBox status code.
10584 * @param pVCpu The cross context virtual CPU structure.
10585 * @param cbInstr The instruction length in bytes.
10586 *
10587 * @remarks In ring-0 not all of the state needs to be synced in.
10588 */
10589VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10590{
10591 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10592
10593 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10595 Assert(!pVCpu->iem.s.cActiveMappings);
10596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10597}
10598
10599
10600/**
10601 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10602 *
10603 * @returns Strict VBox status code.
10604 * @param pVCpu The cross context virtual CPU structure.
10605 * @param cbInstr The instruction length in bytes.
10606 * @param uValue The value to load into CR0.
10607 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10608 * memory operand. Otherwise pass NIL_RTGCPTR.
10609 *
10610 * @remarks In ring-0 not all of the state needs to be synced in.
10611 */
10612VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10613{
10614 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10615
10616 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10618 Assert(!pVCpu->iem.s.cActiveMappings);
10619 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10620}
10621
10622
10623/**
10624 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10625 *
10626 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10627 *
10628 * @returns Strict VBox status code.
10629 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10630 * @param cbInstr The instruction length in bytes.
10631 * @remarks In ring-0 not all of the state needs to be synced in.
10632 * @thread EMT(pVCpu)
10633 */
10634VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10635{
10636 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10637
10638 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10640 Assert(!pVCpu->iem.s.cActiveMappings);
10641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10642}
10643
10644
10645/**
10646 * Interface for HM and EM to emulate the WBINVD instruction.
10647 *
10648 * @returns Strict VBox status code.
10649 * @param pVCpu The cross context virtual CPU structure.
10650 * @param cbInstr The instruction length in bytes.
10651 *
10652 * @remarks In ring-0 not all of the state needs to be synced in.
10653 */
10654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10655{
10656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10657
10658 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10659 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10660 Assert(!pVCpu->iem.s.cActiveMappings);
10661 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10662}
10663
10664
10665/**
10666 * Interface for HM and EM to emulate the INVD instruction.
10667 *
10668 * @returns Strict VBox status code.
10669 * @param pVCpu The cross context virtual CPU structure.
10670 * @param cbInstr The instruction length in bytes.
10671 *
10672 * @remarks In ring-0 not all of the state needs to be synced in.
10673 */
10674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10675{
10676 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10677
10678 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10679 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10680 Assert(!pVCpu->iem.s.cActiveMappings);
10681 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10682}
10683
10684
10685/**
10686 * Interface for HM and EM to emulate the INVLPG instruction.
10687 *
10688 * @returns Strict VBox status code.
10689 * @retval VINF_PGM_SYNC_CR3
10690 *
10691 * @param pVCpu The cross context virtual CPU structure.
10692 * @param cbInstr The instruction length in bytes.
10693 * @param GCPtrPage The effective address of the page to invalidate.
10694 *
10695 * @remarks In ring-0 not all of the state needs to be synced in.
10696 */
10697VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10698{
10699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10700
10701 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the INVPCID instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @retval VINF_PGM_SYNC_CR3
10713 *
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 * @param iEffSeg The effective segment register.
10717 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10718 * @param uType The invalidation type.
10719 *
10720 * @remarks In ring-0 not all of the state needs to be synced in.
10721 */
10722VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10723 uint64_t uType)
10724{
10725 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10726
10727 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10728 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10729 Assert(!pVCpu->iem.s.cActiveMappings);
10730 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10731}
10732
10733
10734/**
10735 * Interface for HM and EM to emulate the CPUID instruction.
10736 *
10737 * @returns Strict VBox status code.
10738 *
10739 * @param pVCpu The cross context virtual CPU structure.
10740 * @param cbInstr The instruction length in bytes.
10741 *
10742 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10743 */
10744VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10747 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10748
10749 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10750 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10751 Assert(!pVCpu->iem.s.cActiveMappings);
10752 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10753}
10754
10755
10756/**
10757 * Interface for HM and EM to emulate the RDPMC instruction.
10758 *
10759 * @returns Strict VBox status code.
10760 *
10761 * @param pVCpu The cross context virtual CPU structure.
10762 * @param cbInstr The instruction length in bytes.
10763 *
10764 * @remarks Not all of the state needs to be synced in.
10765 */
10766VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10767{
10768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10769 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10770
10771 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10772 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10773 Assert(!pVCpu->iem.s.cActiveMappings);
10774 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10775}
10776
10777
10778/**
10779 * Interface for HM and EM to emulate the RDTSC instruction.
10780 *
10781 * @returns Strict VBox status code.
10782 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10783 *
10784 * @param pVCpu The cross context virtual CPU structure.
10785 * @param cbInstr The instruction length in bytes.
10786 *
10787 * @remarks Not all of the state needs to be synced in.
10788 */
10789VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10790{
10791 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10792 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10793
10794 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10795 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10796 Assert(!pVCpu->iem.s.cActiveMappings);
10797 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10798}
10799
10800
10801/**
10802 * Interface for HM and EM to emulate the RDTSCP instruction.
10803 *
10804 * @returns Strict VBox status code.
10805 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10806 *
10807 * @param pVCpu The cross context virtual CPU structure.
10808 * @param cbInstr The instruction length in bytes.
10809 *
10810 * @remarks Not all of the state needs to be synced in. Recommended
10811 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10812 */
10813VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10814{
10815 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10816 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10817
10818 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10819 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10820 Assert(!pVCpu->iem.s.cActiveMappings);
10821 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10822}
10823
10824
10825/**
10826 * Interface for HM and EM to emulate the RDMSR instruction.
10827 *
10828 * @returns Strict VBox status code.
10829 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10830 *
10831 * @param pVCpu The cross context virtual CPU structure.
10832 * @param cbInstr The instruction length in bytes.
10833 *
10834 * @remarks Not all of the state needs to be synced in. Requires RCX and
10835 * (currently) all MSRs.
10836 */
10837VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10838{
10839 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10840 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10841
10842 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10843 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10844 Assert(!pVCpu->iem.s.cActiveMappings);
10845 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10846}
10847
10848
10849/**
10850 * Interface for HM and EM to emulate the WRMSR instruction.
10851 *
10852 * @returns Strict VBox status code.
10853 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10854 *
10855 * @param pVCpu The cross context virtual CPU structure.
10856 * @param cbInstr The instruction length in bytes.
10857 *
10858 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10859 * and (currently) all MSRs.
10860 */
10861VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10862{
10863 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10864 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10865 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10866
10867 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10868 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10869 Assert(!pVCpu->iem.s.cActiveMappings);
10870 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10871}
10872
10873
10874/**
10875 * Interface for HM and EM to emulate the MONITOR instruction.
10876 *
10877 * @returns Strict VBox status code.
10878 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10879 *
10880 * @param pVCpu The cross context virtual CPU structure.
10881 * @param cbInstr The instruction length in bytes.
10882 *
10883 * @remarks Not all of the state needs to be synced in.
10884 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10885 * are used.
10886 */
10887VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10888{
10889 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10890 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10891
10892 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10893 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10894 Assert(!pVCpu->iem.s.cActiveMappings);
10895 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10896}
10897
10898
10899/**
10900 * Interface for HM and EM to emulate the MWAIT instruction.
10901 *
10902 * @returns Strict VBox status code.
10903 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10904 *
10905 * @param pVCpu The cross context virtual CPU structure.
10906 * @param cbInstr The instruction length in bytes.
10907 *
10908 * @remarks Not all of the state needs to be synced in.
10909 */
10910VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10911{
10912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10913 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10914
10915 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10916 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10917 Assert(!pVCpu->iem.s.cActiveMappings);
10918 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10919}
10920
10921
10922/**
10923 * Interface for HM and EM to emulate the HLT instruction.
10924 *
10925 * @returns Strict VBox status code.
10926 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10927 *
10928 * @param pVCpu The cross context virtual CPU structure.
10929 * @param cbInstr The instruction length in bytes.
10930 *
10931 * @remarks Not all of the state needs to be synced in.
10932 */
10933VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10934{
10935 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10936
10937 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10938 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10939 Assert(!pVCpu->iem.s.cActiveMappings);
10940 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10941}
10942
10943
10944/**
10945 * Checks if IEM is in the process of delivering an event (interrupt or
10946 * exception).
10947 *
10948 * @returns true if we're in the process of raising an interrupt or exception,
10949 * false otherwise.
10950 * @param pVCpu The cross context virtual CPU structure.
10951 * @param puVector Where to store the vector associated with the
10952 * currently delivered event, optional.
10953 * @param pfFlags Where to store th event delivery flags (see
10954 * IEM_XCPT_FLAGS_XXX), optional.
10955 * @param puErr Where to store the error code associated with the
10956 * event, optional.
10957 * @param puCr2 Where to store the CR2 associated with the event,
10958 * optional.
10959 * @remarks The caller should check the flags to determine if the error code and
10960 * CR2 are valid for the event.
10961 */
10962VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10963{
10964 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10965 if (fRaisingXcpt)
10966 {
10967 if (puVector)
10968 *puVector = pVCpu->iem.s.uCurXcpt;
10969 if (pfFlags)
10970 *pfFlags = pVCpu->iem.s.fCurXcpt;
10971 if (puErr)
10972 *puErr = pVCpu->iem.s.uCurXcptErr;
10973 if (puCr2)
10974 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10975 }
10976 return fRaisingXcpt;
10977}
10978
10979#ifdef IN_RING3
10980
10981/**
10982 * Handles the unlikely and probably fatal merge cases.
10983 *
10984 * @returns Merged status code.
10985 * @param rcStrict Current EM status code.
10986 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10987 * with @a rcStrict.
10988 * @param iMemMap The memory mapping index. For error reporting only.
10989 * @param pVCpu The cross context virtual CPU structure of the calling
10990 * thread, for error reporting only.
10991 */
10992DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10993 unsigned iMemMap, PVMCPUCC pVCpu)
10994{
10995 if (RT_FAILURE_NP(rcStrict))
10996 return rcStrict;
10997
10998 if (RT_FAILURE_NP(rcStrictCommit))
10999 return rcStrictCommit;
11000
11001 if (rcStrict == rcStrictCommit)
11002 return rcStrictCommit;
11003
11004 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11005 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11006 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11007 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11008 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11009 return VERR_IOM_FF_STATUS_IPE;
11010}
11011
11012
11013/**
11014 * Helper for IOMR3ProcessForceFlag.
11015 *
11016 * @returns Merged status code.
11017 * @param rcStrict Current EM status code.
11018 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11019 * with @a rcStrict.
11020 * @param iMemMap The memory mapping index. For error reporting only.
11021 * @param pVCpu The cross context virtual CPU structure of the calling
11022 * thread, for error reporting only.
11023 */
11024DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11025{
11026 /* Simple. */
11027 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11028 return rcStrictCommit;
11029
11030 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11031 return rcStrict;
11032
11033 /* EM scheduling status codes. */
11034 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11035 && rcStrict <= VINF_EM_LAST))
11036 {
11037 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11038 && rcStrictCommit <= VINF_EM_LAST))
11039 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11040 }
11041
11042 /* Unlikely */
11043 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11044}
11045
11046
11047/**
11048 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11049 *
11050 * @returns Merge between @a rcStrict and what the commit operation returned.
11051 * @param pVM The cross context VM structure.
11052 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11053 * @param rcStrict The status code returned by ring-0 or raw-mode.
11054 */
11055VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11056{
11057 /*
11058 * Reset the pending commit.
11059 */
11060 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11061 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11062 ("%#x %#x %#x\n",
11063 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11064 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11065
11066 /*
11067 * Commit the pending bounce buffers (usually just one).
11068 */
11069 unsigned cBufs = 0;
11070 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11071 while (iMemMap-- > 0)
11072 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11073 {
11074 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11075 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11076 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11077
11078 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11079 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11080 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11081
11082 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11083 {
11084 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11085 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11086 pbBuf,
11087 cbFirst,
11088 PGMACCESSORIGIN_IEM);
11089 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11090 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11091 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11092 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11093 }
11094
11095 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11096 {
11097 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11098 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11099 pbBuf + cbFirst,
11100 cbSecond,
11101 PGMACCESSORIGIN_IEM);
11102 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11103 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11104 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11105 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11106 }
11107 cBufs++;
11108 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11109 }
11110
11111 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11112 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11113 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11114 pVCpu->iem.s.cActiveMappings = 0;
11115 return rcStrict;
11116}
11117
11118#endif /* IN_RING3 */
11119
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette