VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100626

最後變更 在這個檔案從100626是 100626,由 vboxsync 提交於 20 月 前

VMM/IEM: Logging. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 471.6 KB
 
1/* $Id: IEMAll.cpp 100626 2023-07-18 10:12:35Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
177 * path.
178 *
179 * @returns IEM_F_BRK_PENDING_XXX or zero.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 *
183 * @note Don't call directly, use iemCalcExecDbgFlags instead.
184 */
185uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
186{
187 uint32_t fExec = 0;
188
189 /*
190 * Process guest breakpoints.
191 */
192#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
193 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
194 { \
195 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
196 { \
197 case X86_DR7_RW_EO: \
198 fExec |= IEM_F_PENDING_BRK_INSTR; \
199 break; \
200 case X86_DR7_RW_WO: \
201 case X86_DR7_RW_RW: \
202 fExec |= IEM_F_PENDING_BRK_DATA; \
203 break; \
204 case X86_DR7_RW_IO: \
205 fExec |= IEM_F_PENDING_BRK_X86_IO; \
206 break; \
207 } \
208 } \
209 } while (0)
210
211 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
212 if (fGstDr7 & X86_DR7_ENABLED_MASK)
213 {
214 PROCESS_ONE_BP(fGstDr7, 0);
215 PROCESS_ONE_BP(fGstDr7, 1);
216 PROCESS_ONE_BP(fGstDr7, 2);
217 PROCESS_ONE_BP(fGstDr7, 3);
218 }
219
220 /*
221 * Process hypervisor breakpoints.
222 */
223 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
224 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
225 {
226 PROCESS_ONE_BP(fHyperDr7, 0);
227 PROCESS_ONE_BP(fHyperDr7, 1);
228 PROCESS_ONE_BP(fHyperDr7, 2);
229 PROCESS_ONE_BP(fHyperDr7, 3);
230 }
231
232 return fExec;
233}
234
235
236/**
237 * Initializes the decoder state.
238 *
239 * iemReInitDecoder is mostly a copy of this function.
240 *
241 * @param pVCpu The cross context virtual CPU structure of the
242 * calling thread.
243 * @param fExecOpts Optional execution flags:
244 * - IEM_F_BYPASS_HANDLERS
245 * - IEM_F_X86_DISREGARD_LOCK
246 */
247DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
248{
249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
250 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
259
260 /* Execution state: */
261 uint32_t fExec;
262 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
263
264 /* Decoder state: */
265 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
267 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
268 {
269 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
270 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
271 }
272 else
273 {
274 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
275 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
276 }
277 pVCpu->iem.s.fPrefixes = 0;
278 pVCpu->iem.s.uRexReg = 0;
279 pVCpu->iem.s.uRexB = 0;
280 pVCpu->iem.s.uRexIndex = 0;
281 pVCpu->iem.s.idxPrefix = 0;
282 pVCpu->iem.s.uVex3rdReg = 0;
283 pVCpu->iem.s.uVexLength = 0;
284 pVCpu->iem.s.fEvexStuff = 0;
285 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
286#ifdef IEM_WITH_CODE_TLB
287 pVCpu->iem.s.pbInstrBuf = NULL;
288 pVCpu->iem.s.offInstrNextByte = 0;
289 pVCpu->iem.s.offCurInstrStart = 0;
290# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
291 pVCpu->iem.s.offOpcode = 0;
292# endif
293# ifdef VBOX_STRICT
294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
295 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
296 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
297 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
298# endif
299#else
300 pVCpu->iem.s.offOpcode = 0;
301 pVCpu->iem.s.cbOpcode = 0;
302#endif
303 pVCpu->iem.s.offModRm = 0;
304 pVCpu->iem.s.cActiveMappings = 0;
305 pVCpu->iem.s.iNextMapping = 0;
306 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
307
308#ifdef DBGFTRACE_ENABLED
309 switch (IEM_GET_CPU_MODE(pVCpu))
310 {
311 case IEMMODE_64BIT:
312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
313 break;
314 case IEMMODE_32BIT:
315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
316 break;
317 case IEMMODE_16BIT:
318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
319 break;
320 }
321#endif
322}
323
324
325/**
326 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
327 *
328 * This is mostly a copy of iemInitDecoder.
329 *
330 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
331 */
332DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
333{
334 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
343
344 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
345 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
346 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
347
348 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
349 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
350 pVCpu->iem.s.enmEffAddrMode = enmMode;
351 if (enmMode != IEMMODE_64BIT)
352 {
353 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
354 pVCpu->iem.s.enmEffOpSize = enmMode;
355 }
356 else
357 {
358 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
359 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
360 }
361 pVCpu->iem.s.fPrefixes = 0;
362 pVCpu->iem.s.uRexReg = 0;
363 pVCpu->iem.s.uRexB = 0;
364 pVCpu->iem.s.uRexIndex = 0;
365 pVCpu->iem.s.idxPrefix = 0;
366 pVCpu->iem.s.uVex3rdReg = 0;
367 pVCpu->iem.s.uVexLength = 0;
368 pVCpu->iem.s.fEvexStuff = 0;
369 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
370#ifdef IEM_WITH_CODE_TLB
371 if (pVCpu->iem.s.pbInstrBuf)
372 {
373 uint64_t off = (enmMode == IEMMODE_64BIT
374 ? pVCpu->cpum.GstCtx.rip
375 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
376 - pVCpu->iem.s.uInstrBufPc;
377 if (off < pVCpu->iem.s.cbInstrBufTotal)
378 {
379 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
380 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
381 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
382 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
383 else
384 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
385 }
386 else
387 {
388 pVCpu->iem.s.pbInstrBuf = NULL;
389 pVCpu->iem.s.offInstrNextByte = 0;
390 pVCpu->iem.s.offCurInstrStart = 0;
391 pVCpu->iem.s.cbInstrBuf = 0;
392 pVCpu->iem.s.cbInstrBufTotal = 0;
393 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
394 }
395 }
396 else
397 {
398 pVCpu->iem.s.offInstrNextByte = 0;
399 pVCpu->iem.s.offCurInstrStart = 0;
400 pVCpu->iem.s.cbInstrBuf = 0;
401 pVCpu->iem.s.cbInstrBufTotal = 0;
402# ifdef VBOX_STRICT
403 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
404# endif
405 }
406# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
407 pVCpu->iem.s.offOpcode = 0;
408# endif
409#else /* !IEM_WITH_CODE_TLB */
410 pVCpu->iem.s.cbOpcode = 0;
411 pVCpu->iem.s.offOpcode = 0;
412#endif /* !IEM_WITH_CODE_TLB */
413 pVCpu->iem.s.offModRm = 0;
414 Assert(pVCpu->iem.s.cActiveMappings == 0);
415 pVCpu->iem.s.iNextMapping = 0;
416 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
417 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
418
419#ifdef DBGFTRACE_ENABLED
420 switch (enmMode)
421 {
422 case IEMMODE_64BIT:
423 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
424 break;
425 case IEMMODE_32BIT:
426 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
427 break;
428 case IEMMODE_16BIT:
429 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
430 break;
431 }
432#endif
433}
434
435
436
437/**
438 * Prefetch opcodes the first time when starting executing.
439 *
440 * @returns Strict VBox status code.
441 * @param pVCpu The cross context virtual CPU structure of the
442 * calling thread.
443 * @param fExecOpts Optional execution flags:
444 * - IEM_F_BYPASS_HANDLERS
445 * - IEM_F_X86_DISREGARD_LOCK
446 */
447static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
448{
449 iemInitDecoder(pVCpu, fExecOpts);
450
451#ifndef IEM_WITH_CODE_TLB
452 /*
453 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
454 *
455 * First translate CS:rIP to a physical address.
456 *
457 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
458 * all relevant bytes from the first page, as it ASSUMES it's only ever
459 * called for dealing with CS.LIM, page crossing and instructions that
460 * are too long.
461 */
462 uint32_t cbToTryRead;
463 RTGCPTR GCPtrPC;
464 if (IEM_IS_64BIT_CODE(pVCpu))
465 {
466 cbToTryRead = GUEST_PAGE_SIZE;
467 GCPtrPC = pVCpu->cpum.GstCtx.rip;
468 if (IEM_IS_CANONICAL(GCPtrPC))
469 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
470 else
471 return iemRaiseGeneralProtectionFault0(pVCpu);
472 }
473 else
474 {
475 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
476 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
477 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
478 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
479 else
480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
481 if (cbToTryRead) { /* likely */ }
482 else /* overflowed */
483 {
484 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
485 cbToTryRead = UINT32_MAX;
486 }
487 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
488 Assert(GCPtrPC <= UINT32_MAX);
489 }
490
491 PGMPTWALK Walk;
492 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
493 if (RT_SUCCESS(rc))
494 Assert(Walk.fSucceeded); /* probable. */
495 else
496 {
497 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
498# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
499 if (Walk.fFailed & PGM_WALKFAIL_EPT)
500 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
501# endif
502 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
503 }
504 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
505 else
506 {
507 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
508# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
509 if (Walk.fFailed & PGM_WALKFAIL_EPT)
510 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
511# endif
512 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
513 }
514 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
515 else
516 {
517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
518# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
519 if (Walk.fFailed & PGM_WALKFAIL_EPT)
520 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
521# endif
522 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
523 }
524 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
525 /** @todo Check reserved bits and such stuff. PGM is better at doing
526 * that, so do it when implementing the guest virtual address
527 * TLB... */
528
529 /*
530 * Read the bytes at this address.
531 */
532 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
533 if (cbToTryRead > cbLeftOnPage)
534 cbToTryRead = cbLeftOnPage;
535 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
536 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
537
538 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
539 {
540 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
542 { /* likely */ }
543 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
544 {
545 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
546 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
547 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
548 }
549 else
550 {
551 Log((RT_SUCCESS(rcStrict)
552 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
553 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
555 return rcStrict;
556 }
557 }
558 else
559 {
560 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
561 if (RT_SUCCESS(rc))
562 { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
566 GCPtrPC, GCPhys, rc, cbToTryRead));
567 return rc;
568 }
569 }
570 pVCpu->iem.s.cbOpcode = cbToTryRead;
571#endif /* !IEM_WITH_CODE_TLB */
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Invalidates the IEM TLBs.
578 *
579 * This is called internally as well as by PGM when moving GC mappings.
580 *
581 * @param pVCpu The cross context virtual CPU structure of the calling
582 * thread.
583 */
584VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
585{
586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
587 Log10(("IEMTlbInvalidateAll\n"));
588# ifdef IEM_WITH_CODE_TLB
589 pVCpu->iem.s.cbInstrBufTotal = 0;
590 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
591 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
592 { /* very likely */ }
593 else
594 {
595 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
596 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
597 while (i-- > 0)
598 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
599 }
600# endif
601
602# ifdef IEM_WITH_DATA_TLB
603 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
604 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
605 { /* very likely */ }
606 else
607 {
608 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
609 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
610 while (i-- > 0)
611 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
612 }
613# endif
614#else
615 RT_NOREF(pVCpu);
616#endif
617}
618
619
620/**
621 * Invalidates a page in the TLBs.
622 *
623 * @param pVCpu The cross context virtual CPU structure of the calling
624 * thread.
625 * @param GCPtr The address of the page to invalidate
626 * @thread EMT(pVCpu)
627 */
628VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
629{
630#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
631 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
632 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
633 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
634 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
635
636# ifdef IEM_WITH_CODE_TLB
637 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
638 {
639 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
640 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
641 pVCpu->iem.s.cbInstrBufTotal = 0;
642 }
643# endif
644
645# ifdef IEM_WITH_DATA_TLB
646 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
647 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
648# endif
649#else
650 NOREF(pVCpu); NOREF(GCPtr);
651#endif
652}
653
654
655#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
656/**
657 * Invalid both TLBs slow fashion following a rollover.
658 *
659 * Worker for IEMTlbInvalidateAllPhysical,
660 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
661 * iemMemMapJmp and others.
662 *
663 * @thread EMT(pVCpu)
664 */
665static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
666{
667 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
668 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
669 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
670
671 unsigned i;
672# ifdef IEM_WITH_CODE_TLB
673 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
674 while (i-- > 0)
675 {
676 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
677 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
678 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
679 }
680# endif
681# ifdef IEM_WITH_DATA_TLB
682 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
683 while (i-- > 0)
684 {
685 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
686 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
688 }
689# endif
690
691}
692#endif
693
694
695/**
696 * Invalidates the host physical aspects of the IEM TLBs.
697 *
698 * This is called internally as well as by PGM when moving GC mappings.
699 *
700 * @param pVCpu The cross context virtual CPU structure of the calling
701 * thread.
702 * @note Currently not used.
703 */
704VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
705{
706#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
707 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
708 Log10(("IEMTlbInvalidateAllPhysical\n"));
709
710# ifdef IEM_WITH_CODE_TLB
711 pVCpu->iem.s.cbInstrBufTotal = 0;
712# endif
713 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
714 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
715 {
716 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
717 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
718 }
719 else
720 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
721#else
722 NOREF(pVCpu);
723#endif
724}
725
726
727/**
728 * Invalidates the host physical aspects of the IEM TLBs.
729 *
730 * This is called internally as well as by PGM when moving GC mappings.
731 *
732 * @param pVM The cross context VM structure.
733 * @param idCpuCaller The ID of the calling EMT if available to the caller,
734 * otherwise NIL_VMCPUID.
735 *
736 * @remarks Caller holds the PGM lock.
737 */
738VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
739{
740#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
741 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
742 if (pVCpuCaller)
743 VMCPU_ASSERT_EMT(pVCpuCaller);
744 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
745
746 VMCC_FOR_EACH_VMCPU(pVM)
747 {
748# ifdef IEM_WITH_CODE_TLB
749 if (pVCpuCaller == pVCpu)
750 pVCpu->iem.s.cbInstrBufTotal = 0;
751# endif
752
753 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
754 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
755 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
756 { /* likely */}
757 else if (pVCpuCaller == pVCpu)
758 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
759 else
760 {
761 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
762 continue;
763 }
764 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
765 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
766 }
767 VMCC_FOR_EACH_VMCPU_END(pVM);
768
769#else
770 RT_NOREF(pVM, idCpuCaller);
771#endif
772}
773
774
775/**
776 * Flushes the prefetch buffer, light version.
777 */
778void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
779{
780#ifndef IEM_WITH_CODE_TLB
781 pVCpu->iem.s.cbOpcode = cbInstr;
782#else
783 RT_NOREF(pVCpu, cbInstr);
784#endif
785}
786
787
788/**
789 * Flushes the prefetch buffer, heavy version.
790 */
791void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
792{
793#ifndef IEM_WITH_CODE_TLB
794 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
795#elif 1
796 pVCpu->iem.s.pbInstrBuf = NULL;
797 RT_NOREF(cbInstr);
798#else
799 RT_NOREF(pVCpu, cbInstr);
800#endif
801}
802
803
804
805#ifdef IEM_WITH_CODE_TLB
806
807/**
808 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
809 * failure and jumps.
810 *
811 * We end up here for a number of reasons:
812 * - pbInstrBuf isn't yet initialized.
813 * - Advancing beyond the buffer boundrary (e.g. cross page).
814 * - Advancing beyond the CS segment limit.
815 * - Fetching from non-mappable page (e.g. MMIO).
816 *
817 * @param pVCpu The cross context virtual CPU structure of the
818 * calling thread.
819 * @param pvDst Where to return the bytes.
820 * @param cbDst Number of bytes to read. A value of zero is
821 * allowed for initializing pbInstrBuf (the
822 * recompiler does this). In this case it is best
823 * to set pbInstrBuf to NULL prior to the call.
824 */
825void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
826{
827# ifdef IN_RING3
828 for (;;)
829 {
830 Assert(cbDst <= 8);
831 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
832
833 /*
834 * We might have a partial buffer match, deal with that first to make the
835 * rest simpler. This is the first part of the cross page/buffer case.
836 */
837 if (pVCpu->iem.s.pbInstrBuf != NULL)
838 {
839 if (offBuf < pVCpu->iem.s.cbInstrBuf)
840 {
841 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
842 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
843 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
844
845 cbDst -= cbCopy;
846 pvDst = (uint8_t *)pvDst + cbCopy;
847 offBuf += cbCopy;
848 pVCpu->iem.s.offInstrNextByte += offBuf;
849 }
850 }
851
852 /*
853 * Check segment limit, figuring how much we're allowed to access at this point.
854 *
855 * We will fault immediately if RIP is past the segment limit / in non-canonical
856 * territory. If we do continue, there are one or more bytes to read before we
857 * end up in trouble and we need to do that first before faulting.
858 */
859 RTGCPTR GCPtrFirst;
860 uint32_t cbMaxRead;
861 if (IEM_IS_64BIT_CODE(pVCpu))
862 {
863 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
864 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
865 { /* likely */ }
866 else
867 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
868 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
869 }
870 else
871 {
872 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
873 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
874 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
875 { /* likely */ }
876 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
877 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
878 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
879 if (cbMaxRead != 0)
880 { /* likely */ }
881 else
882 {
883 /* Overflowed because address is 0 and limit is max. */
884 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
885 cbMaxRead = X86_PAGE_SIZE;
886 }
887 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
888 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
889 if (cbMaxRead2 < cbMaxRead)
890 cbMaxRead = cbMaxRead2;
891 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
892 }
893
894 /*
895 * Get the TLB entry for this piece of code.
896 */
897 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
898 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
899 if (pTlbe->uTag == uTag)
900 {
901 /* likely when executing lots of code, otherwise unlikely */
902# ifdef VBOX_WITH_STATISTICS
903 pVCpu->iem.s.CodeTlb.cTlbHits++;
904# endif
905 }
906 else
907 {
908 pVCpu->iem.s.CodeTlb.cTlbMisses++;
909 PGMPTWALK Walk;
910 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
911 if (RT_FAILURE(rc))
912 {
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
914 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
915 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
916#endif
917 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
918 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
919 }
920
921 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
922 Assert(Walk.fSucceeded);
923 pTlbe->uTag = uTag;
924 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
925 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
926 pTlbe->GCPhys = Walk.GCPhys;
927 pTlbe->pbMappingR3 = NULL;
928 }
929
930 /*
931 * Check TLB page table level access flags.
932 */
933 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
934 {
935 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
936 {
937 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
938 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
939 }
940 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
944 }
945 }
946
947 /*
948 * Look up the physical page info if necessary.
949 */
950 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
951 { /* not necessary */ }
952 else
953 {
954 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
955 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
956 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
957 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
958 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
959 { /* likely */ }
960 else
961 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
962 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
963 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
964 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
965 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
966 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
967 }
968
969# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
970 /*
971 * Try do a direct read using the pbMappingR3 pointer.
972 */
973 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
974 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
975 {
976 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
977 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
978 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
979 {
980 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
981 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
982 }
983 else
984 {
985 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
986 if (cbInstr + (uint32_t)cbDst <= 15)
987 {
988 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
989 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
990 }
991 else
992 {
993 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
994 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
995 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
996 }
997 }
998 if (cbDst <= cbMaxRead)
999 {
1000 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1001 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1002 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1003 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1004 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1005 return;
1006 }
1007 pVCpu->iem.s.pbInstrBuf = NULL;
1008
1009 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1010 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1011 }
1012# else
1013# error "refactor as needed"
1014 /*
1015 * If there is no special read handling, so we can read a bit more and
1016 * put it in the prefetch buffer.
1017 */
1018 if ( cbDst < cbMaxRead
1019 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1020 {
1021 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1022 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1023 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1024 { /* likely */ }
1025 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1026 {
1027 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1028 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1029 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1030 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1031 }
1032 else
1033 {
1034 Log((RT_SUCCESS(rcStrict)
1035 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1036 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1039 }
1040 }
1041# endif
1042 /*
1043 * Special read handling, so only read exactly what's needed.
1044 * This is a highly unlikely scenario.
1045 */
1046 else
1047 {
1048 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1049
1050 /* Check instruction length. */
1051 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1052 if (RT_LIKELY(cbInstr + cbDst <= 15))
1053 { /* likely */ }
1054 else
1055 {
1056 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1057 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1058 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1059 }
1060
1061 /* Do the reading. */
1062 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1063 if (cbToRead > 0)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1066 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085
1086 /* Update the state and probably return. */
1087 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1088 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1089 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1090 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1091 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE;
1092 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1093 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1094 pVCpu->iem.s.pbInstrBuf = NULL;
1095 if (cbToRead == cbDst)
1096 return;
1097 }
1098
1099 /*
1100 * More to read, loop.
1101 */
1102 cbDst -= cbMaxRead;
1103 pvDst = (uint8_t *)pvDst + cbMaxRead;
1104 }
1105# else /* !IN_RING3 */
1106 RT_NOREF(pvDst, cbDst);
1107 if (pvDst || cbDst)
1108 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1109# endif /* !IN_RING3 */
1110}
1111
1112#else /* !IEM_WITH_CODE_TLB */
1113
1114/**
1115 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1116 * exception if it fails.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pVCpu The cross context virtual CPU structure of the
1120 * calling thread.
1121 * @param cbMin The minimum number of bytes relative offOpcode
1122 * that must be read.
1123 */
1124VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1125{
1126 /*
1127 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1128 *
1129 * First translate CS:rIP to a physical address.
1130 */
1131 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1132 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1133 uint8_t const cbLeft = cbOpcode - offOpcode;
1134 Assert(cbLeft < cbMin);
1135 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1136
1137 uint32_t cbToTryRead;
1138 RTGCPTR GCPtrNext;
1139 if (IEM_IS_64BIT_CODE(pVCpu))
1140 {
1141 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1142 if (!IEM_IS_CANONICAL(GCPtrNext))
1143 return iemRaiseGeneralProtectionFault0(pVCpu);
1144 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1145 }
1146 else
1147 {
1148 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1149 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1150 GCPtrNext32 += cbOpcode;
1151 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1152 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1153 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1154 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1155 if (!cbToTryRead) /* overflowed */
1156 {
1157 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1158 cbToTryRead = UINT32_MAX;
1159 /** @todo check out wrapping around the code segment. */
1160 }
1161 if (cbToTryRead < cbMin - cbLeft)
1162 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1163 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1164
1165 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1166 if (cbToTryRead > cbLeftOnPage)
1167 cbToTryRead = cbLeftOnPage;
1168 }
1169
1170 /* Restrict to opcode buffer space.
1171
1172 We're making ASSUMPTIONS here based on work done previously in
1173 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1174 be fetched in case of an instruction crossing two pages. */
1175 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1176 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1177 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1178 { /* likely */ }
1179 else
1180 {
1181 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1182 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1183 return iemRaiseGeneralProtectionFault0(pVCpu);
1184 }
1185
1186 PGMPTWALK Walk;
1187 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1188 if (RT_FAILURE(rc))
1189 {
1190 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1191#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1192 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1193 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1194#endif
1195 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1196 }
1197 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1200#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1201 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1202 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1203#endif
1204 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1205 }
1206 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1207 {
1208 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1210 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1211 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1212#endif
1213 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1214 }
1215 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1216 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1217 /** @todo Check reserved bits and such stuff. PGM is better at doing
1218 * that, so do it when implementing the guest virtual address
1219 * TLB... */
1220
1221 /*
1222 * Read the bytes at this address.
1223 *
1224 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1225 * and since PATM should only patch the start of an instruction there
1226 * should be no need to check again here.
1227 */
1228 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1229 {
1230 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1231 cbToTryRead, PGMACCESSORIGIN_IEM);
1232 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1233 { /* likely */ }
1234 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1237 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1239 }
1240 else
1241 {
1242 Log((RT_SUCCESS(rcStrict)
1243 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1244 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1245 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1246 return rcStrict;
1247 }
1248 }
1249 else
1250 {
1251 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1252 if (RT_SUCCESS(rc))
1253 { /* likely */ }
1254 else
1255 {
1256 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1257 return rc;
1258 }
1259 }
1260 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1261 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1262
1263 return VINF_SUCCESS;
1264}
1265
1266#endif /* !IEM_WITH_CODE_TLB */
1267#ifndef IEM_WITH_SETJMP
1268
1269/**
1270 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1271 *
1272 * @returns Strict VBox status code.
1273 * @param pVCpu The cross context virtual CPU structure of the
1274 * calling thread.
1275 * @param pb Where to return the opcode byte.
1276 */
1277VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1278{
1279 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1280 if (rcStrict == VINF_SUCCESS)
1281 {
1282 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1283 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1284 pVCpu->iem.s.offOpcode = offOpcode + 1;
1285 }
1286 else
1287 *pb = 0;
1288 return rcStrict;
1289}
1290
1291#else /* IEM_WITH_SETJMP */
1292
1293/**
1294 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1295 *
1296 * @returns The opcode byte.
1297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1298 */
1299uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1300{
1301# ifdef IEM_WITH_CODE_TLB
1302 uint8_t u8;
1303 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1304 return u8;
1305# else
1306 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1307 if (rcStrict == VINF_SUCCESS)
1308 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1309 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1310# endif
1311}
1312
1313#endif /* IEM_WITH_SETJMP */
1314
1315#ifndef IEM_WITH_SETJMP
1316
1317/**
1318 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1322 * @param pu16 Where to return the opcode dword.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1325{
1326 uint8_t u8;
1327 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1328 if (rcStrict == VINF_SUCCESS)
1329 *pu16 = (int8_t)u8;
1330 return rcStrict;
1331}
1332
1333
1334/**
1335 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1336 *
1337 * @returns Strict VBox status code.
1338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1339 * @param pu32 Where to return the opcode dword.
1340 */
1341VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1342{
1343 uint8_t u8;
1344 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1345 if (rcStrict == VINF_SUCCESS)
1346 *pu32 = (int8_t)u8;
1347 return rcStrict;
1348}
1349
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu64 Where to return the opcode qword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu64 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367#endif /* !IEM_WITH_SETJMP */
1368
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu16 Where to return the opcode word.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1385# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1386 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1387# else
1388 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1389# endif
1390 pVCpu->iem.s.offOpcode = offOpcode + 2;
1391 }
1392 else
1393 *pu16 = 0;
1394 return rcStrict;
1395}
1396
1397#else /* IEM_WITH_SETJMP */
1398
1399/**
1400 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1401 *
1402 * @returns The opcode word.
1403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1404 */
1405uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1406{
1407# ifdef IEM_WITH_CODE_TLB
1408 uint16_t u16;
1409 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1410 return u16;
1411# else
1412 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1413 if (rcStrict == VINF_SUCCESS)
1414 {
1415 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1416 pVCpu->iem.s.offOpcode += 2;
1417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1419# else
1420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1421# endif
1422 }
1423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1424# endif
1425}
1426
1427#endif /* IEM_WITH_SETJMP */
1428
1429#ifndef IEM_WITH_SETJMP
1430
1431/**
1432 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1433 *
1434 * @returns Strict VBox status code.
1435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1436 * @param pu32 Where to return the opcode double word.
1437 */
1438VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1439{
1440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1441 if (rcStrict == VINF_SUCCESS)
1442 {
1443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1444 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1445 pVCpu->iem.s.offOpcode = offOpcode + 2;
1446 }
1447 else
1448 *pu32 = 0;
1449 return rcStrict;
1450}
1451
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1455 *
1456 * @returns Strict VBox status code.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 * @param pu64 Where to return the opcode quad word.
1459 */
1460VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1461{
1462 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1463 if (rcStrict == VINF_SUCCESS)
1464 {
1465 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1466 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1467 pVCpu->iem.s.offOpcode = offOpcode + 2;
1468 }
1469 else
1470 *pu64 = 0;
1471 return rcStrict;
1472}
1473
1474#endif /* !IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode dword.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1492 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1493# else
1494 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1495 pVCpu->iem.s.abOpcode[offOpcode + 1],
1496 pVCpu->iem.s.abOpcode[offOpcode + 2],
1497 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1498# endif
1499 pVCpu->iem.s.offOpcode = offOpcode + 4;
1500 }
1501 else
1502 *pu32 = 0;
1503 return rcStrict;
1504}
1505
1506#else /* IEM_WITH_SETJMP */
1507
1508/**
1509 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1510 *
1511 * @returns The opcode dword.
1512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1513 */
1514uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1515{
1516# ifdef IEM_WITH_CODE_TLB
1517 uint32_t u32;
1518 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1519 return u32;
1520# else
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525 pVCpu->iem.s.offOpcode = offOpcode + 4;
1526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1527 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1528# else
1529 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1530 pVCpu->iem.s.abOpcode[offOpcode + 1],
1531 pVCpu->iem.s.abOpcode[offOpcode + 2],
1532 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1533# endif
1534 }
1535 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1536# endif
1537}
1538
1539#endif /* IEM_WITH_SETJMP */
1540
1541#ifndef IEM_WITH_SETJMP
1542
1543/**
1544 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1548 * @param pu64 Where to return the opcode dword.
1549 */
1550VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1551{
1552 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1553 if (rcStrict == VINF_SUCCESS)
1554 {
1555 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1556 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1557 pVCpu->iem.s.abOpcode[offOpcode + 1],
1558 pVCpu->iem.s.abOpcode[offOpcode + 2],
1559 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1560 pVCpu->iem.s.offOpcode = offOpcode + 4;
1561 }
1562 else
1563 *pu64 = 0;
1564 return rcStrict;
1565}
1566
1567
1568/**
1569 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1570 *
1571 * @returns Strict VBox status code.
1572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1573 * @param pu64 Where to return the opcode qword.
1574 */
1575VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1576{
1577 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1581 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1582 pVCpu->iem.s.abOpcode[offOpcode + 1],
1583 pVCpu->iem.s.abOpcode[offOpcode + 2],
1584 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1585 pVCpu->iem.s.offOpcode = offOpcode + 4;
1586 }
1587 else
1588 *pu64 = 0;
1589 return rcStrict;
1590}
1591
1592#endif /* !IEM_WITH_SETJMP */
1593
1594#ifndef IEM_WITH_SETJMP
1595
1596/**
1597 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1601 * @param pu64 Where to return the opcode qword.
1602 */
1603VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1604{
1605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1606 if (rcStrict == VINF_SUCCESS)
1607 {
1608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1609# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1610 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1611# else
1612 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1613 pVCpu->iem.s.abOpcode[offOpcode + 1],
1614 pVCpu->iem.s.abOpcode[offOpcode + 2],
1615 pVCpu->iem.s.abOpcode[offOpcode + 3],
1616 pVCpu->iem.s.abOpcode[offOpcode + 4],
1617 pVCpu->iem.s.abOpcode[offOpcode + 5],
1618 pVCpu->iem.s.abOpcode[offOpcode + 6],
1619 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1620# endif
1621 pVCpu->iem.s.offOpcode = offOpcode + 8;
1622 }
1623 else
1624 *pu64 = 0;
1625 return rcStrict;
1626}
1627
1628#else /* IEM_WITH_SETJMP */
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1632 *
1633 * @returns The opcode qword.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 */
1636uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1637{
1638# ifdef IEM_WITH_CODE_TLB
1639 uint64_t u64;
1640 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1641 return u64;
1642# else
1643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1644 if (rcStrict == VINF_SUCCESS)
1645 {
1646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1647 pVCpu->iem.s.offOpcode = offOpcode + 8;
1648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1649 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1650# else
1651 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1652 pVCpu->iem.s.abOpcode[offOpcode + 1],
1653 pVCpu->iem.s.abOpcode[offOpcode + 2],
1654 pVCpu->iem.s.abOpcode[offOpcode + 3],
1655 pVCpu->iem.s.abOpcode[offOpcode + 4],
1656 pVCpu->iem.s.abOpcode[offOpcode + 5],
1657 pVCpu->iem.s.abOpcode[offOpcode + 6],
1658 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1659# endif
1660 }
1661 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1662# endif
1663}
1664
1665#endif /* IEM_WITH_SETJMP */
1666
1667
1668
1669/** @name Misc Worker Functions.
1670 * @{
1671 */
1672
1673/**
1674 * Gets the exception class for the specified exception vector.
1675 *
1676 * @returns The class of the specified exception.
1677 * @param uVector The exception vector.
1678 */
1679static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1680{
1681 Assert(uVector <= X86_XCPT_LAST);
1682 switch (uVector)
1683 {
1684 case X86_XCPT_DE:
1685 case X86_XCPT_TS:
1686 case X86_XCPT_NP:
1687 case X86_XCPT_SS:
1688 case X86_XCPT_GP:
1689 case X86_XCPT_SX: /* AMD only */
1690 return IEMXCPTCLASS_CONTRIBUTORY;
1691
1692 case X86_XCPT_PF:
1693 case X86_XCPT_VE: /* Intel only */
1694 return IEMXCPTCLASS_PAGE_FAULT;
1695
1696 case X86_XCPT_DF:
1697 return IEMXCPTCLASS_DOUBLE_FAULT;
1698 }
1699 return IEMXCPTCLASS_BENIGN;
1700}
1701
1702
1703/**
1704 * Evaluates how to handle an exception caused during delivery of another event
1705 * (exception / interrupt).
1706 *
1707 * @returns How to handle the recursive exception.
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param fPrevFlags The flags of the previous event.
1711 * @param uPrevVector The vector of the previous event.
1712 * @param fCurFlags The flags of the current exception.
1713 * @param uCurVector The vector of the current exception.
1714 * @param pfXcptRaiseInfo Where to store additional information about the
1715 * exception condition. Optional.
1716 */
1717VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1718 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1719{
1720 /*
1721 * Only CPU exceptions can be raised while delivering other events, software interrupt
1722 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1723 */
1724 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1725 Assert(pVCpu); RT_NOREF(pVCpu);
1726 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1727
1728 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1729 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1730 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1731 {
1732 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1733 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1734 {
1735 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1736 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1737 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1738 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1739 {
1740 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1741 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1742 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1743 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1744 uCurVector, pVCpu->cpum.GstCtx.cr2));
1745 }
1746 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1747 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1748 {
1749 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1750 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1751 }
1752 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1753 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1754 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1755 {
1756 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1757 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1758 }
1759 }
1760 else
1761 {
1762 if (uPrevVector == X86_XCPT_NMI)
1763 {
1764 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1765 if (uCurVector == X86_XCPT_PF)
1766 {
1767 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1768 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1769 }
1770 }
1771 else if ( uPrevVector == X86_XCPT_AC
1772 && uCurVector == X86_XCPT_AC)
1773 {
1774 enmRaise = IEMXCPTRAISE_CPU_HANG;
1775 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1776 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1777 }
1778 }
1779 }
1780 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1781 {
1782 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1783 if (uCurVector == X86_XCPT_PF)
1784 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1785 }
1786 else
1787 {
1788 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1789 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1790 }
1791
1792 if (pfXcptRaiseInfo)
1793 *pfXcptRaiseInfo = fRaiseInfo;
1794 return enmRaise;
1795}
1796
1797
1798/**
1799 * Enters the CPU shutdown state initiated by a triple fault or other
1800 * unrecoverable conditions.
1801 *
1802 * @returns Strict VBox status code.
1803 * @param pVCpu The cross context virtual CPU structure of the
1804 * calling thread.
1805 */
1806static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1807{
1808 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1809 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1810
1811 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1812 {
1813 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1814 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1815 }
1816
1817 RT_NOREF(pVCpu);
1818 return VINF_EM_TRIPLE_FAULT;
1819}
1820
1821
1822/**
1823 * Validates a new SS segment.
1824 *
1825 * @returns VBox strict status code.
1826 * @param pVCpu The cross context virtual CPU structure of the
1827 * calling thread.
1828 * @param NewSS The new SS selctor.
1829 * @param uCpl The CPL to load the stack for.
1830 * @param pDesc Where to return the descriptor.
1831 */
1832static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1833{
1834 /* Null selectors are not allowed (we're not called for dispatching
1835 interrupts with SS=0 in long mode). */
1836 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1837 {
1838 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1839 return iemRaiseTaskSwitchFault0(pVCpu);
1840 }
1841
1842 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1843 if ((NewSS & X86_SEL_RPL) != uCpl)
1844 {
1845 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1846 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1847 }
1848
1849 /*
1850 * Read the descriptor.
1851 */
1852 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1853 if (rcStrict != VINF_SUCCESS)
1854 return rcStrict;
1855
1856 /*
1857 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1858 */
1859 if (!pDesc->Legacy.Gen.u1DescType)
1860 {
1861 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1862 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1863 }
1864
1865 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1866 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1867 {
1868 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1869 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1870 }
1871 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 /* Is it there? */
1878 /** @todo testcase: Is this checked before the canonical / limit check below? */
1879 if (!pDesc->Legacy.Gen.u1Present)
1880 {
1881 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1882 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1883 }
1884
1885 return VINF_SUCCESS;
1886}
1887
1888/** @} */
1889
1890
1891/** @name Raising Exceptions.
1892 *
1893 * @{
1894 */
1895
1896
1897/**
1898 * Loads the specified stack far pointer from the TSS.
1899 *
1900 * @returns VBox strict status code.
1901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1902 * @param uCpl The CPL to load the stack for.
1903 * @param pSelSS Where to return the new stack segment.
1904 * @param puEsp Where to return the new stack pointer.
1905 */
1906static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1907{
1908 VBOXSTRICTRC rcStrict;
1909 Assert(uCpl < 4);
1910
1911 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1912 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1913 {
1914 /*
1915 * 16-bit TSS (X86TSS16).
1916 */
1917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1918 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1919 {
1920 uint32_t off = uCpl * 4 + 2;
1921 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1922 {
1923 /** @todo check actual access pattern here. */
1924 uint32_t u32Tmp = 0; /* gcc maybe... */
1925 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1926 if (rcStrict == VINF_SUCCESS)
1927 {
1928 *puEsp = RT_LOWORD(u32Tmp);
1929 *pSelSS = RT_HIWORD(u32Tmp);
1930 return VINF_SUCCESS;
1931 }
1932 }
1933 else
1934 {
1935 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1936 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1937 }
1938 break;
1939 }
1940
1941 /*
1942 * 32-bit TSS (X86TSS32).
1943 */
1944 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1945 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1946 {
1947 uint32_t off = uCpl * 8 + 4;
1948 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1949 {
1950/** @todo check actual access pattern here. */
1951 uint64_t u64Tmp;
1952 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1953 if (rcStrict == VINF_SUCCESS)
1954 {
1955 *puEsp = u64Tmp & UINT32_MAX;
1956 *pSelSS = (RTSEL)(u64Tmp >> 32);
1957 return VINF_SUCCESS;
1958 }
1959 }
1960 else
1961 {
1962 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1963 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1964 }
1965 break;
1966 }
1967
1968 default:
1969 AssertFailed();
1970 rcStrict = VERR_IEM_IPE_4;
1971 break;
1972 }
1973
1974 *puEsp = 0; /* make gcc happy */
1975 *pSelSS = 0; /* make gcc happy */
1976 return rcStrict;
1977}
1978
1979
1980/**
1981 * Loads the specified stack pointer from the 64-bit TSS.
1982 *
1983 * @returns VBox strict status code.
1984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1985 * @param uCpl The CPL to load the stack for.
1986 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1987 * @param puRsp Where to return the new stack pointer.
1988 */
1989static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1990{
1991 Assert(uCpl < 4);
1992 Assert(uIst < 8);
1993 *puRsp = 0; /* make gcc happy */
1994
1995 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1996 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1997
1998 uint32_t off;
1999 if (uIst)
2000 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2001 else
2002 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2003 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2004 {
2005 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2006 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2007 }
2008
2009 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2010}
2011
2012
2013/**
2014 * Adjust the CPU state according to the exception being raised.
2015 *
2016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2017 * @param u8Vector The exception that has been raised.
2018 */
2019DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2020{
2021 switch (u8Vector)
2022 {
2023 case X86_XCPT_DB:
2024 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2025 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2026 break;
2027 /** @todo Read the AMD and Intel exception reference... */
2028 }
2029}
2030
2031
2032/**
2033 * Implements exceptions and interrupts for real mode.
2034 *
2035 * @returns VBox strict status code.
2036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2037 * @param cbInstr The number of bytes to offset rIP by in the return
2038 * address.
2039 * @param u8Vector The interrupt / exception vector number.
2040 * @param fFlags The flags.
2041 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2042 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2043 */
2044static VBOXSTRICTRC
2045iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2046 uint8_t cbInstr,
2047 uint8_t u8Vector,
2048 uint32_t fFlags,
2049 uint16_t uErr,
2050 uint64_t uCr2) RT_NOEXCEPT
2051{
2052 NOREF(uErr); NOREF(uCr2);
2053 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2054
2055 /*
2056 * Read the IDT entry.
2057 */
2058 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2059 {
2060 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2061 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2062 }
2063 RTFAR16 Idte;
2064 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2065 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2066 {
2067 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2068 return rcStrict;
2069 }
2070
2071 /*
2072 * Push the stack frame.
2073 */
2074 uint16_t *pu16Frame;
2075 uint64_t uNewRsp;
2076 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2077 if (rcStrict != VINF_SUCCESS)
2078 return rcStrict;
2079
2080 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2081#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2082 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2083 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2084 fEfl |= UINT16_C(0xf000);
2085#endif
2086 pu16Frame[2] = (uint16_t)fEfl;
2087 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2088 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2089 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2091 return rcStrict;
2092
2093 /*
2094 * Load the vector address into cs:ip and make exception specific state
2095 * adjustments.
2096 */
2097 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2098 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2099 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2100 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2101 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2102 pVCpu->cpum.GstCtx.rip = Idte.off;
2103 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2104 IEMMISC_SET_EFL(pVCpu, fEfl);
2105
2106 /** @todo do we actually do this in real mode? */
2107 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2108 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2109
2110 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2111 so best leave them alone in case we're in a weird kind of real mode... */
2112
2113 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2114}
2115
2116
2117/**
2118 * Loads a NULL data selector into when coming from V8086 mode.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2121 * @param pSReg Pointer to the segment register.
2122 */
2123DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2124{
2125 pSReg->Sel = 0;
2126 pSReg->ValidSel = 0;
2127 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2128 {
2129 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2130 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2131 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2132 }
2133 else
2134 {
2135 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2136 /** @todo check this on AMD-V */
2137 pSReg->u64Base = 0;
2138 pSReg->u32Limit = 0;
2139 }
2140}
2141
2142
2143/**
2144 * Loads a segment selector during a task switch in V8086 mode.
2145 *
2146 * @param pSReg Pointer to the segment register.
2147 * @param uSel The selector value to load.
2148 */
2149DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2150{
2151 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2152 pSReg->Sel = uSel;
2153 pSReg->ValidSel = uSel;
2154 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2155 pSReg->u64Base = uSel << 4;
2156 pSReg->u32Limit = 0xffff;
2157 pSReg->Attr.u = 0xf3;
2158}
2159
2160
2161/**
2162 * Loads a segment selector during a task switch in protected mode.
2163 *
2164 * In this task switch scenario, we would throw \#TS exceptions rather than
2165 * \#GPs.
2166 *
2167 * @returns VBox strict status code.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 * @param pSReg Pointer to the segment register.
2170 * @param uSel The new selector value.
2171 *
2172 * @remarks This does _not_ handle CS or SS.
2173 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2174 */
2175static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2176{
2177 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2178
2179 /* Null data selector. */
2180 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2181 {
2182 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2184 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2185 return VINF_SUCCESS;
2186 }
2187
2188 /* Fetch the descriptor. */
2189 IEMSELDESC Desc;
2190 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2191 if (rcStrict != VINF_SUCCESS)
2192 {
2193 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2194 VBOXSTRICTRC_VAL(rcStrict)));
2195 return rcStrict;
2196 }
2197
2198 /* Must be a data segment or readable code segment. */
2199 if ( !Desc.Legacy.Gen.u1DescType
2200 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2201 {
2202 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2203 Desc.Legacy.Gen.u4Type));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2205 }
2206
2207 /* Check privileges for data segments and non-conforming code segments. */
2208 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2210 {
2211 /* The RPL and the new CPL must be less than or equal to the DPL. */
2212 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2213 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2214 {
2215 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2216 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2217 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2218 }
2219 }
2220
2221 /* Is it there? */
2222 if (!Desc.Legacy.Gen.u1Present)
2223 {
2224 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2225 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2226 }
2227
2228 /* The base and limit. */
2229 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2230 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2231
2232 /*
2233 * Ok, everything checked out fine. Now set the accessed bit before
2234 * committing the result into the registers.
2235 */
2236 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2237 {
2238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2239 if (rcStrict != VINF_SUCCESS)
2240 return rcStrict;
2241 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2242 }
2243
2244 /* Commit */
2245 pSReg->Sel = uSel;
2246 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2247 pSReg->u32Limit = cbLimit;
2248 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2249 pSReg->ValidSel = uSel;
2250 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2251 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2252 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2253
2254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2256 return VINF_SUCCESS;
2257}
2258
2259
2260/**
2261 * Performs a task switch.
2262 *
2263 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2264 * caller is responsible for performing the necessary checks (like DPL, TSS
2265 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2266 * reference for JMP, CALL, IRET.
2267 *
2268 * If the task switch is the due to a software interrupt or hardware exception,
2269 * the caller is responsible for validating the TSS selector and descriptor. See
2270 * Intel Instruction reference for INT n.
2271 *
2272 * @returns VBox strict status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param enmTaskSwitch The cause of the task switch.
2275 * @param uNextEip The EIP effective after the task switch.
2276 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2277 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2278 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2279 * @param SelTSS The TSS selector of the new task.
2280 * @param pNewDescTSS Pointer to the new TSS descriptor.
2281 */
2282VBOXSTRICTRC
2283iemTaskSwitch(PVMCPUCC pVCpu,
2284 IEMTASKSWITCH enmTaskSwitch,
2285 uint32_t uNextEip,
2286 uint32_t fFlags,
2287 uint16_t uErr,
2288 uint64_t uCr2,
2289 RTSEL SelTSS,
2290 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2291{
2292 Assert(!IEM_IS_REAL_MODE(pVCpu));
2293 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2294 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2295
2296 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2297 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2298 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2299 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2300 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2301
2302 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2303 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2304
2305 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2306 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2307
2308 /* Update CR2 in case it's a page-fault. */
2309 /** @todo This should probably be done much earlier in IEM/PGM. See
2310 * @bugref{5653#c49}. */
2311 if (fFlags & IEM_XCPT_FLAGS_CR2)
2312 pVCpu->cpum.GstCtx.cr2 = uCr2;
2313
2314 /*
2315 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2316 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2317 */
2318 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2319 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2320 if (uNewTSSLimit < uNewTSSLimitMin)
2321 {
2322 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2323 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2324 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2325 }
2326
2327 /*
2328 * Task switches in VMX non-root mode always cause task switches.
2329 * The new TSS must have been read and validated (DPL, limits etc.) before a
2330 * task-switch VM-exit commences.
2331 *
2332 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2333 */
2334 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2335 {
2336 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2337 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2338 }
2339
2340 /*
2341 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2342 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2343 */
2344 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2345 {
2346 uint32_t const uExitInfo1 = SelTSS;
2347 uint32_t uExitInfo2 = uErr;
2348 switch (enmTaskSwitch)
2349 {
2350 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2351 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2352 default: break;
2353 }
2354 if (fFlags & IEM_XCPT_FLAGS_ERR)
2355 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2356 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2357 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2358
2359 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2360 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2361 RT_NOREF2(uExitInfo1, uExitInfo2);
2362 }
2363
2364 /*
2365 * Check the current TSS limit. The last written byte to the current TSS during the
2366 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2367 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2368 *
2369 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2370 * end up with smaller than "legal" TSS limits.
2371 */
2372 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2373 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2374 if (uCurTSSLimit < uCurTSSLimitMin)
2375 {
2376 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2377 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2378 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2379 }
2380
2381 /*
2382 * Verify that the new TSS can be accessed and map it. Map only the required contents
2383 * and not the entire TSS.
2384 */
2385 void *pvNewTSS;
2386 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2387 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2388 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2389 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2390 * not perform correct translation if this happens. See Intel spec. 7.2.1
2391 * "Task-State Segment". */
2392 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2393 if (rcStrict != VINF_SUCCESS)
2394 {
2395 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2396 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2397 return rcStrict;
2398 }
2399
2400 /*
2401 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2402 */
2403 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2404 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2405 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2406 {
2407 PX86DESC pDescCurTSS;
2408 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2409 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2410 if (rcStrict != VINF_SUCCESS)
2411 {
2412 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2413 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2414 return rcStrict;
2415 }
2416
2417 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2418 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2419 if (rcStrict != VINF_SUCCESS)
2420 {
2421 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2422 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2423 return rcStrict;
2424 }
2425
2426 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2427 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2428 {
2429 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2430 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2431 fEFlags &= ~X86_EFL_NT;
2432 }
2433 }
2434
2435 /*
2436 * Save the CPU state into the current TSS.
2437 */
2438 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2439 if (GCPtrNewTSS == GCPtrCurTSS)
2440 {
2441 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2442 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2443 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2444 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2445 pVCpu->cpum.GstCtx.ldtr.Sel));
2446 }
2447 if (fIsNewTSS386)
2448 {
2449 /*
2450 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2451 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2452 */
2453 void *pvCurTSS32;
2454 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2455 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2456 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2457 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2461 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2466 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2467 pCurTSS32->eip = uNextEip;
2468 pCurTSS32->eflags = fEFlags;
2469 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2470 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2471 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2472 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2473 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2474 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2475 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2476 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2477 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2478 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2479 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2480 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2481 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2482 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2483
2484 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2485 if (rcStrict != VINF_SUCCESS)
2486 {
2487 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2488 VBOXSTRICTRC_VAL(rcStrict)));
2489 return rcStrict;
2490 }
2491 }
2492 else
2493 {
2494 /*
2495 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2496 */
2497 void *pvCurTSS16;
2498 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2499 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2500 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2501 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2502 if (rcStrict != VINF_SUCCESS)
2503 {
2504 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2505 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2506 return rcStrict;
2507 }
2508
2509 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2510 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2511 pCurTSS16->ip = uNextEip;
2512 pCurTSS16->flags = (uint16_t)fEFlags;
2513 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2514 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2515 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2516 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2517 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2518 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2519 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2520 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2521 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2522 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2523 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2524 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2525
2526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2527 if (rcStrict != VINF_SUCCESS)
2528 {
2529 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2530 VBOXSTRICTRC_VAL(rcStrict)));
2531 return rcStrict;
2532 }
2533 }
2534
2535 /*
2536 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2537 */
2538 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2539 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2540 {
2541 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2542 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2543 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2544 }
2545
2546 /*
2547 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2548 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2549 */
2550 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2551 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2552 bool fNewDebugTrap;
2553 if (fIsNewTSS386)
2554 {
2555 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2556 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2557 uNewEip = pNewTSS32->eip;
2558 uNewEflags = pNewTSS32->eflags;
2559 uNewEax = pNewTSS32->eax;
2560 uNewEcx = pNewTSS32->ecx;
2561 uNewEdx = pNewTSS32->edx;
2562 uNewEbx = pNewTSS32->ebx;
2563 uNewEsp = pNewTSS32->esp;
2564 uNewEbp = pNewTSS32->ebp;
2565 uNewEsi = pNewTSS32->esi;
2566 uNewEdi = pNewTSS32->edi;
2567 uNewES = pNewTSS32->es;
2568 uNewCS = pNewTSS32->cs;
2569 uNewSS = pNewTSS32->ss;
2570 uNewDS = pNewTSS32->ds;
2571 uNewFS = pNewTSS32->fs;
2572 uNewGS = pNewTSS32->gs;
2573 uNewLdt = pNewTSS32->selLdt;
2574 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2575 }
2576 else
2577 {
2578 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2579 uNewCr3 = 0;
2580 uNewEip = pNewTSS16->ip;
2581 uNewEflags = pNewTSS16->flags;
2582 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2583 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2584 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2585 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2586 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2587 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2588 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2589 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2590 uNewES = pNewTSS16->es;
2591 uNewCS = pNewTSS16->cs;
2592 uNewSS = pNewTSS16->ss;
2593 uNewDS = pNewTSS16->ds;
2594 uNewFS = 0;
2595 uNewGS = 0;
2596 uNewLdt = pNewTSS16->selLdt;
2597 fNewDebugTrap = false;
2598 }
2599
2600 if (GCPtrNewTSS == GCPtrCurTSS)
2601 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2602 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2603
2604 /*
2605 * We're done accessing the new TSS.
2606 */
2607 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2608 if (rcStrict != VINF_SUCCESS)
2609 {
2610 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2611 return rcStrict;
2612 }
2613
2614 /*
2615 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2616 */
2617 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2618 {
2619 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2620 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2621 if (rcStrict != VINF_SUCCESS)
2622 {
2623 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2624 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2625 return rcStrict;
2626 }
2627
2628 /* Check that the descriptor indicates the new TSS is available (not busy). */
2629 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2630 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2631 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2632
2633 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2634 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2635 if (rcStrict != VINF_SUCCESS)
2636 {
2637 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2638 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2639 return rcStrict;
2640 }
2641 }
2642
2643 /*
2644 * From this point on, we're technically in the new task. We will defer exceptions
2645 * until the completion of the task switch but before executing any instructions in the new task.
2646 */
2647 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2648 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2649 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2650 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2651 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2652 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2653 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2654
2655 /* Set the busy bit in TR. */
2656 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2657
2658 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2659 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2660 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2661 {
2662 uNewEflags |= X86_EFL_NT;
2663 }
2664
2665 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2666 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2667 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2668
2669 pVCpu->cpum.GstCtx.eip = uNewEip;
2670 pVCpu->cpum.GstCtx.eax = uNewEax;
2671 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2672 pVCpu->cpum.GstCtx.edx = uNewEdx;
2673 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2674 pVCpu->cpum.GstCtx.esp = uNewEsp;
2675 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2676 pVCpu->cpum.GstCtx.esi = uNewEsi;
2677 pVCpu->cpum.GstCtx.edi = uNewEdi;
2678
2679 uNewEflags &= X86_EFL_LIVE_MASK;
2680 uNewEflags |= X86_EFL_RA1_MASK;
2681 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2682
2683 /*
2684 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2685 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2686 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2687 */
2688 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2689 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2690
2691 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2692 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2693
2694 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2695 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2696
2697 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2698 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2699
2700 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2701 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2704 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2705 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2706
2707 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2708 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2709 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2711
2712 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2713 {
2714 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2715 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2716 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2717 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2718 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2719 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2720 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2721 }
2722
2723 /*
2724 * Switch CR3 for the new task.
2725 */
2726 if ( fIsNewTSS386
2727 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2728 {
2729 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2730 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2731 AssertRCSuccessReturn(rc, rc);
2732
2733 /* Inform PGM. */
2734 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2735 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2736 AssertRCReturn(rc, rc);
2737 /* ignore informational status codes */
2738
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2740 }
2741
2742 /*
2743 * Switch LDTR for the new task.
2744 */
2745 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2746 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2747 else
2748 {
2749 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2750
2751 IEMSELDESC DescNewLdt;
2752 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2753 if (rcStrict != VINF_SUCCESS)
2754 {
2755 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2756 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2757 return rcStrict;
2758 }
2759 if ( !DescNewLdt.Legacy.Gen.u1Present
2760 || DescNewLdt.Legacy.Gen.u1DescType
2761 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2762 {
2763 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2764 uNewLdt, DescNewLdt.Legacy.u));
2765 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2766 }
2767
2768 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2769 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2770 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2771 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2773 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2774 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2776 }
2777
2778 IEMSELDESC DescSS;
2779 if (IEM_IS_V86_MODE(pVCpu))
2780 {
2781 IEM_SET_CPL(pVCpu, 3);
2782 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2783 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2784 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2785 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2786 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2787 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2788
2789 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2790 DescSS.Legacy.u = 0;
2791 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2792 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2793 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2794 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2795 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2796 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2797 DescSS.Legacy.Gen.u2Dpl = 3;
2798 }
2799 else
2800 {
2801 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2802
2803 /*
2804 * Load the stack segment for the new task.
2805 */
2806 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2807 {
2808 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2809 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2810 }
2811
2812 /* Fetch the descriptor. */
2813 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2814 if (rcStrict != VINF_SUCCESS)
2815 {
2816 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2817 VBOXSTRICTRC_VAL(rcStrict)));
2818 return rcStrict;
2819 }
2820
2821 /* SS must be a data segment and writable. */
2822 if ( !DescSS.Legacy.Gen.u1DescType
2823 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2824 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2825 {
2826 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2827 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2832 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2833 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2834 {
2835 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2836 uNewCpl));
2837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2838 }
2839
2840 /* Is it there? */
2841 if (!DescSS.Legacy.Gen.u1Present)
2842 {
2843 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2844 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2845 }
2846
2847 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2848 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2849
2850 /* Set the accessed bit before committing the result into SS. */
2851 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2852 {
2853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2854 if (rcStrict != VINF_SUCCESS)
2855 return rcStrict;
2856 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2857 }
2858
2859 /* Commit SS. */
2860 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2861 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2862 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2863 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2864 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2865 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2867
2868 /* CPL has changed, update IEM before loading rest of segments. */
2869 IEM_SET_CPL(pVCpu, uNewCpl);
2870
2871 /*
2872 * Load the data segments for the new task.
2873 */
2874 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2875 if (rcStrict != VINF_SUCCESS)
2876 return rcStrict;
2877 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2878 if (rcStrict != VINF_SUCCESS)
2879 return rcStrict;
2880 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2881 if (rcStrict != VINF_SUCCESS)
2882 return rcStrict;
2883 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2884 if (rcStrict != VINF_SUCCESS)
2885 return rcStrict;
2886
2887 /*
2888 * Load the code segment for the new task.
2889 */
2890 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2891 {
2892 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2894 }
2895
2896 /* Fetch the descriptor. */
2897 IEMSELDESC DescCS;
2898 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2899 if (rcStrict != VINF_SUCCESS)
2900 {
2901 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2902 return rcStrict;
2903 }
2904
2905 /* CS must be a code segment. */
2906 if ( !DescCS.Legacy.Gen.u1DescType
2907 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2908 {
2909 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2910 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2911 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2912 }
2913
2914 /* For conforming CS, DPL must be less than or equal to the RPL. */
2915 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2916 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2917 {
2918 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2919 DescCS.Legacy.Gen.u2Dpl));
2920 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2921 }
2922
2923 /* For non-conforming CS, DPL must match RPL. */
2924 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2925 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2926 {
2927 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2928 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2929 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2930 }
2931
2932 /* Is it there? */
2933 if (!DescCS.Legacy.Gen.u1Present)
2934 {
2935 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2936 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2940 u64Base = X86DESC_BASE(&DescCS.Legacy);
2941
2942 /* Set the accessed bit before committing the result into CS. */
2943 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2944 {
2945 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2946 if (rcStrict != VINF_SUCCESS)
2947 return rcStrict;
2948 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2949 }
2950
2951 /* Commit CS. */
2952 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2953 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2954 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2955 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2956 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2957 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2959 }
2960
2961 /* Make sure the CPU mode is correct. */
2962 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2963 if (fExecNew != pVCpu->iem.s.fExec)
2964 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2965 pVCpu->iem.s.fExec = fExecNew;
2966
2967 /** @todo Debug trap. */
2968 if (fIsNewTSS386 && fNewDebugTrap)
2969 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2970
2971 /*
2972 * Construct the error code masks based on what caused this task switch.
2973 * See Intel Instruction reference for INT.
2974 */
2975 uint16_t uExt;
2976 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2977 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2978 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2979 uExt = 1;
2980 else
2981 uExt = 0;
2982
2983 /*
2984 * Push any error code on to the new stack.
2985 */
2986 if (fFlags & IEM_XCPT_FLAGS_ERR)
2987 {
2988 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2989 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2990 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2991
2992 /* Check that there is sufficient space on the stack. */
2993 /** @todo Factor out segment limit checking for normal/expand down segments
2994 * into a separate function. */
2995 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2996 {
2997 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2998 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2999 {
3000 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3001 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3002 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3003 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3004 }
3005 }
3006 else
3007 {
3008 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3009 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3010 {
3011 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3012 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3013 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3014 }
3015 }
3016
3017
3018 if (fIsNewTSS386)
3019 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3020 else
3021 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3022 if (rcStrict != VINF_SUCCESS)
3023 {
3024 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3025 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3026 return rcStrict;
3027 }
3028 }
3029
3030 /* Check the new EIP against the new CS limit. */
3031 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3032 {
3033 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3034 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3035 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3036 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3037 }
3038
3039 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3040 pVCpu->cpum.GstCtx.ss.Sel));
3041 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3042}
3043
3044
3045/**
3046 * Implements exceptions and interrupts for protected mode.
3047 *
3048 * @returns VBox strict status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param cbInstr The number of bytes to offset rIP by in the return
3051 * address.
3052 * @param u8Vector The interrupt / exception vector number.
3053 * @param fFlags The flags.
3054 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3055 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3056 */
3057static VBOXSTRICTRC
3058iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3059 uint8_t cbInstr,
3060 uint8_t u8Vector,
3061 uint32_t fFlags,
3062 uint16_t uErr,
3063 uint64_t uCr2) RT_NOEXCEPT
3064{
3065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3066
3067 /*
3068 * Read the IDT entry.
3069 */
3070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3071 {
3072 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3074 }
3075 X86DESC Idte;
3076 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3077 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3078 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3079 {
3080 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3081 return rcStrict;
3082 }
3083 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3084 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3085 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3086 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3087
3088 /*
3089 * Check the descriptor type, DPL and such.
3090 * ASSUMES this is done in the same order as described for call-gate calls.
3091 */
3092 if (Idte.Gate.u1DescType)
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3096 }
3097 bool fTaskGate = false;
3098 uint8_t f32BitGate = true;
3099 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3100 switch (Idte.Gate.u4Type)
3101 {
3102 case X86_SEL_TYPE_SYS_UNDEFINED:
3103 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3104 case X86_SEL_TYPE_SYS_LDT:
3105 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3106 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3107 case X86_SEL_TYPE_SYS_UNDEFINED2:
3108 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3109 case X86_SEL_TYPE_SYS_UNDEFINED3:
3110 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3111 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3112 case X86_SEL_TYPE_SYS_UNDEFINED4:
3113 {
3114 /** @todo check what actually happens when the type is wrong...
3115 * esp. call gates. */
3116 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3117 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3118 }
3119
3120 case X86_SEL_TYPE_SYS_286_INT_GATE:
3121 f32BitGate = false;
3122 RT_FALL_THRU();
3123 case X86_SEL_TYPE_SYS_386_INT_GATE:
3124 fEflToClear |= X86_EFL_IF;
3125 break;
3126
3127 case X86_SEL_TYPE_SYS_TASK_GATE:
3128 fTaskGate = true;
3129#ifndef IEM_IMPLEMENTS_TASKSWITCH
3130 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3131#endif
3132 break;
3133
3134 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3135 f32BitGate = false;
3136 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3137 break;
3138
3139 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3140 }
3141
3142 /* Check DPL against CPL if applicable. */
3143 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3144 {
3145 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3146 {
3147 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3148 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3149 }
3150 }
3151
3152 /* Is it there? */
3153 if (!Idte.Gate.u1Present)
3154 {
3155 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3156 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3157 }
3158
3159 /* Is it a task-gate? */
3160 if (fTaskGate)
3161 {
3162 /*
3163 * Construct the error code masks based on what caused this task switch.
3164 * See Intel Instruction reference for INT.
3165 */
3166 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3167 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3168 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3169 RTSEL SelTSS = Idte.Gate.u16Sel;
3170
3171 /*
3172 * Fetch the TSS descriptor in the GDT.
3173 */
3174 IEMSELDESC DescTSS;
3175 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3176 if (rcStrict != VINF_SUCCESS)
3177 {
3178 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3179 VBOXSTRICTRC_VAL(rcStrict)));
3180 return rcStrict;
3181 }
3182
3183 /* The TSS descriptor must be a system segment and be available (not busy). */
3184 if ( DescTSS.Legacy.Gen.u1DescType
3185 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3186 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3187 {
3188 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3189 u8Vector, SelTSS, DescTSS.Legacy.au64));
3190 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3191 }
3192
3193 /* The TSS must be present. */
3194 if (!DescTSS.Legacy.Gen.u1Present)
3195 {
3196 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3197 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3198 }
3199
3200 /* Do the actual task switch. */
3201 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3202 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3203 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3204 }
3205
3206 /* A null CS is bad. */
3207 RTSEL NewCS = Idte.Gate.u16Sel;
3208 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3209 {
3210 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3211 return iemRaiseGeneralProtectionFault0(pVCpu);
3212 }
3213
3214 /* Fetch the descriptor for the new CS. */
3215 IEMSELDESC DescCS;
3216 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3217 if (rcStrict != VINF_SUCCESS)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3220 return rcStrict;
3221 }
3222
3223 /* Must be a code segment. */
3224 if (!DescCS.Legacy.Gen.u1DescType)
3225 {
3226 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3227 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3228 }
3229 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3232 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3233 }
3234
3235 /* Don't allow lowering the privilege level. */
3236 /** @todo Does the lowering of privileges apply to software interrupts
3237 * only? This has bearings on the more-privileged or
3238 * same-privilege stack behavior further down. A testcase would
3239 * be nice. */
3240 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3243 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3245 }
3246
3247 /* Make sure the selector is present. */
3248 if (!DescCS.Legacy.Gen.u1Present)
3249 {
3250 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3251 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3252 }
3253
3254 /* Check the new EIP against the new CS limit. */
3255 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3256 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3257 ? Idte.Gate.u16OffsetLow
3258 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3259 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3260 if (uNewEip > cbLimitCS)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3263 u8Vector, uNewEip, cbLimitCS, NewCS));
3264 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3265 }
3266 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3267
3268 /* Calc the flag image to push. */
3269 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3270 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3271 fEfl &= ~X86_EFL_RF;
3272 else
3273 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3274
3275 /* From V8086 mode only go to CPL 0. */
3276 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3277 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3278 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3279 {
3280 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3281 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3282 }
3283
3284 /*
3285 * If the privilege level changes, we need to get a new stack from the TSS.
3286 * This in turns means validating the new SS and ESP...
3287 */
3288 if (uNewCpl != IEM_GET_CPL(pVCpu))
3289 {
3290 RTSEL NewSS;
3291 uint32_t uNewEsp;
3292 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3293 if (rcStrict != VINF_SUCCESS)
3294 return rcStrict;
3295
3296 IEMSELDESC DescSS;
3297 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3298 if (rcStrict != VINF_SUCCESS)
3299 return rcStrict;
3300 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3301 if (!DescSS.Legacy.Gen.u1DefBig)
3302 {
3303 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3304 uNewEsp = (uint16_t)uNewEsp;
3305 }
3306
3307 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3308
3309 /* Check that there is sufficient space for the stack frame. */
3310 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3311 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3312 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3313 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3314
3315 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3316 {
3317 if ( uNewEsp - 1 > cbLimitSS
3318 || uNewEsp < cbStackFrame)
3319 {
3320 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3321 u8Vector, NewSS, uNewEsp, cbStackFrame));
3322 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3323 }
3324 }
3325 else
3326 {
3327 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3328 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3329 {
3330 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3331 u8Vector, NewSS, uNewEsp, cbStackFrame));
3332 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3333 }
3334 }
3335
3336 /*
3337 * Start making changes.
3338 */
3339
3340 /* Set the new CPL so that stack accesses use it. */
3341 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3342 IEM_SET_CPL(pVCpu, uNewCpl);
3343
3344 /* Create the stack frame. */
3345 RTPTRUNION uStackFrame;
3346 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3347 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3348 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3349 if (rcStrict != VINF_SUCCESS)
3350 return rcStrict;
3351 void * const pvStackFrame = uStackFrame.pv;
3352 if (f32BitGate)
3353 {
3354 if (fFlags & IEM_XCPT_FLAGS_ERR)
3355 *uStackFrame.pu32++ = uErr;
3356 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3357 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3358 uStackFrame.pu32[2] = fEfl;
3359 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3360 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3361 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3362 if (fEfl & X86_EFL_VM)
3363 {
3364 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3365 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3366 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3367 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3368 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3369 }
3370 }
3371 else
3372 {
3373 if (fFlags & IEM_XCPT_FLAGS_ERR)
3374 *uStackFrame.pu16++ = uErr;
3375 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3376 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3377 uStackFrame.pu16[2] = fEfl;
3378 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3379 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3380 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3381 if (fEfl & X86_EFL_VM)
3382 {
3383 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3384 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3385 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3386 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3387 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3388 }
3389 }
3390 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3391 if (rcStrict != VINF_SUCCESS)
3392 return rcStrict;
3393
3394 /* Mark the selectors 'accessed' (hope this is the correct time). */
3395 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3396 * after pushing the stack frame? (Write protect the gdt + stack to
3397 * find out.) */
3398 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3399 {
3400 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3401 if (rcStrict != VINF_SUCCESS)
3402 return rcStrict;
3403 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3404 }
3405
3406 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3407 {
3408 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3409 if (rcStrict != VINF_SUCCESS)
3410 return rcStrict;
3411 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3412 }
3413
3414 /*
3415 * Start comitting the register changes (joins with the DPL=CPL branch).
3416 */
3417 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3418 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3419 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3420 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3421 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3422 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3423 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3424 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3425 * SP is loaded).
3426 * Need to check the other combinations too:
3427 * - 16-bit TSS, 32-bit handler
3428 * - 32-bit TSS, 16-bit handler */
3429 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3430 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3431 else
3432 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3433
3434 if (fEfl & X86_EFL_VM)
3435 {
3436 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3437 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3438 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3439 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3440 }
3441 }
3442 /*
3443 * Same privilege, no stack change and smaller stack frame.
3444 */
3445 else
3446 {
3447 uint64_t uNewRsp;
3448 RTPTRUNION uStackFrame;
3449 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3450 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453 void * const pvStackFrame = uStackFrame.pv;
3454
3455 if (f32BitGate)
3456 {
3457 if (fFlags & IEM_XCPT_FLAGS_ERR)
3458 *uStackFrame.pu32++ = uErr;
3459 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3460 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3461 uStackFrame.pu32[2] = fEfl;
3462 }
3463 else
3464 {
3465 if (fFlags & IEM_XCPT_FLAGS_ERR)
3466 *uStackFrame.pu16++ = uErr;
3467 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3468 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3469 uStackFrame.pu16[2] = fEfl;
3470 }
3471 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474
3475 /* Mark the CS selector as 'accessed'. */
3476 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3477 {
3478 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3479 if (rcStrict != VINF_SUCCESS)
3480 return rcStrict;
3481 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3482 }
3483
3484 /*
3485 * Start committing the register changes (joins with the other branch).
3486 */
3487 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3488 }
3489
3490 /* ... register committing continues. */
3491 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3492 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3493 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3494 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3495 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3496 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3497
3498 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3499 fEfl &= ~fEflToClear;
3500 IEMMISC_SET_EFL(pVCpu, fEfl);
3501
3502 if (fFlags & IEM_XCPT_FLAGS_CR2)
3503 pVCpu->cpum.GstCtx.cr2 = uCr2;
3504
3505 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3506 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3507
3508 /* Make sure the execution flags are correct. */
3509 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3510 if (fExecNew != pVCpu->iem.s.fExec)
3511 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3512 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3513 pVCpu->iem.s.fExec = fExecNew;
3514 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3515
3516 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3517}
3518
3519
3520/**
3521 * Implements exceptions and interrupts for long mode.
3522 *
3523 * @returns VBox strict status code.
3524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3525 * @param cbInstr The number of bytes to offset rIP by in the return
3526 * address.
3527 * @param u8Vector The interrupt / exception vector number.
3528 * @param fFlags The flags.
3529 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3530 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3531 */
3532static VBOXSTRICTRC
3533iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3534 uint8_t cbInstr,
3535 uint8_t u8Vector,
3536 uint32_t fFlags,
3537 uint16_t uErr,
3538 uint64_t uCr2) RT_NOEXCEPT
3539{
3540 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3541
3542 /*
3543 * Read the IDT entry.
3544 */
3545 uint16_t offIdt = (uint16_t)u8Vector << 4;
3546 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3547 {
3548 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3549 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3550 }
3551 X86DESC64 Idte;
3552#ifdef _MSC_VER /* Shut up silly compiler warning. */
3553 Idte.au64[0] = 0;
3554 Idte.au64[1] = 0;
3555#endif
3556 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3558 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3559 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3560 {
3561 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3562 return rcStrict;
3563 }
3564 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3565 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3566 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3567
3568 /*
3569 * Check the descriptor type, DPL and such.
3570 * ASSUMES this is done in the same order as described for call-gate calls.
3571 */
3572 if (Idte.Gate.u1DescType)
3573 {
3574 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3575 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3576 }
3577 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3578 switch (Idte.Gate.u4Type)
3579 {
3580 case AMD64_SEL_TYPE_SYS_INT_GATE:
3581 fEflToClear |= X86_EFL_IF;
3582 break;
3583 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3584 break;
3585
3586 default:
3587 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3588 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3589 }
3590
3591 /* Check DPL against CPL if applicable. */
3592 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3593 {
3594 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3595 {
3596 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3597 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3598 }
3599 }
3600
3601 /* Is it there? */
3602 if (!Idte.Gate.u1Present)
3603 {
3604 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3605 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3606 }
3607
3608 /* A null CS is bad. */
3609 RTSEL NewCS = Idte.Gate.u16Sel;
3610 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3611 {
3612 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3613 return iemRaiseGeneralProtectionFault0(pVCpu);
3614 }
3615
3616 /* Fetch the descriptor for the new CS. */
3617 IEMSELDESC DescCS;
3618 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3619 if (rcStrict != VINF_SUCCESS)
3620 {
3621 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3622 return rcStrict;
3623 }
3624
3625 /* Must be a 64-bit code segment. */
3626 if (!DescCS.Long.Gen.u1DescType)
3627 {
3628 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3629 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3630 }
3631 if ( !DescCS.Long.Gen.u1Long
3632 || DescCS.Long.Gen.u1DefBig
3633 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3634 {
3635 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3636 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3637 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3638 }
3639
3640 /* Don't allow lowering the privilege level. For non-conforming CS
3641 selectors, the CS.DPL sets the privilege level the trap/interrupt
3642 handler runs at. For conforming CS selectors, the CPL remains
3643 unchanged, but the CS.DPL must be <= CPL. */
3644 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3645 * when CPU in Ring-0. Result \#GP? */
3646 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3647 {
3648 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3649 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3650 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3651 }
3652
3653
3654 /* Make sure the selector is present. */
3655 if (!DescCS.Legacy.Gen.u1Present)
3656 {
3657 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3658 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3659 }
3660
3661 /* Check that the new RIP is canonical. */
3662 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3663 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3664 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3665 if (!IEM_IS_CANONICAL(uNewRip))
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3668 return iemRaiseGeneralProtectionFault0(pVCpu);
3669 }
3670
3671 /*
3672 * If the privilege level changes or if the IST isn't zero, we need to get
3673 * a new stack from the TSS.
3674 */
3675 uint64_t uNewRsp;
3676 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3677 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3678 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3679 || Idte.Gate.u3IST != 0)
3680 {
3681 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3682 if (rcStrict != VINF_SUCCESS)
3683 return rcStrict;
3684 }
3685 else
3686 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3687 uNewRsp &= ~(uint64_t)0xf;
3688
3689 /*
3690 * Calc the flag image to push.
3691 */
3692 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3693 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3694 fEfl &= ~X86_EFL_RF;
3695 else
3696 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3697
3698 /*
3699 * Start making changes.
3700 */
3701 /* Set the new CPL so that stack accesses use it. */
3702 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3703 IEM_SET_CPL(pVCpu, uNewCpl);
3704/** @todo Setting CPL this early seems wrong as it would affect and errors we
3705 * raise accessing the stack and (?) GDT/LDT... */
3706
3707 /* Create the stack frame. */
3708 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3709 RTPTRUNION uStackFrame;
3710 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3711 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3712 if (rcStrict != VINF_SUCCESS)
3713 return rcStrict;
3714 void * const pvStackFrame = uStackFrame.pv;
3715
3716 if (fFlags & IEM_XCPT_FLAGS_ERR)
3717 *uStackFrame.pu64++ = uErr;
3718 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3719 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3720 uStackFrame.pu64[2] = fEfl;
3721 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3722 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3723 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3724 if (rcStrict != VINF_SUCCESS)
3725 return rcStrict;
3726
3727 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3728 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3729 * after pushing the stack frame? (Write protect the gdt + stack to
3730 * find out.) */
3731 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3732 {
3733 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3734 if (rcStrict != VINF_SUCCESS)
3735 return rcStrict;
3736 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3737 }
3738
3739 /*
3740 * Start comitting the register changes.
3741 */
3742 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3743 * hidden registers when interrupting 32-bit or 16-bit code! */
3744 if (uNewCpl != uOldCpl)
3745 {
3746 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3747 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3748 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3749 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3750 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3751 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3752 }
3753 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3754 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3755 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3756 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3757 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3758 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3759 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3760 pVCpu->cpum.GstCtx.rip = uNewRip;
3761
3762 fEfl &= ~fEflToClear;
3763 IEMMISC_SET_EFL(pVCpu, fEfl);
3764
3765 if (fFlags & IEM_XCPT_FLAGS_CR2)
3766 pVCpu->cpum.GstCtx.cr2 = uCr2;
3767
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3770
3771 iemRecalcExecModeAndCplFlags(pVCpu);
3772
3773 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3774}
3775
3776
3777/**
3778 * Implements exceptions and interrupts.
3779 *
3780 * All exceptions and interrupts goes thru this function!
3781 *
3782 * @returns VBox strict status code.
3783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3784 * @param cbInstr The number of bytes to offset rIP by in the return
3785 * address.
3786 * @param u8Vector The interrupt / exception vector number.
3787 * @param fFlags The flags.
3788 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3789 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3790 */
3791VBOXSTRICTRC
3792iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3793 uint8_t cbInstr,
3794 uint8_t u8Vector,
3795 uint32_t fFlags,
3796 uint16_t uErr,
3797 uint64_t uCr2) RT_NOEXCEPT
3798{
3799 /*
3800 * Get all the state that we might need here.
3801 */
3802 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3803 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3804
3805#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3806 /*
3807 * Flush prefetch buffer
3808 */
3809 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3810#endif
3811
3812 /*
3813 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3814 */
3815 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3816 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3817 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3818 | IEM_XCPT_FLAGS_BP_INSTR
3819 | IEM_XCPT_FLAGS_ICEBP_INSTR
3820 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3821 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3822 {
3823 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3824 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3825 u8Vector = X86_XCPT_GP;
3826 uErr = 0;
3827 }
3828#ifdef DBGFTRACE_ENABLED
3829 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3830 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3831 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3832#endif
3833
3834 /*
3835 * Evaluate whether NMI blocking should be in effect.
3836 * Normally, NMI blocking is in effect whenever we inject an NMI.
3837 */
3838 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3839 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3840
3841#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3843 {
3844 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3845 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3846 return rcStrict0;
3847
3848 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3849 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3850 {
3851 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3852 fBlockNmi = false;
3853 }
3854 }
3855#endif
3856
3857#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3858 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3859 {
3860 /*
3861 * If the event is being injected as part of VMRUN, it isn't subject to event
3862 * intercepts in the nested-guest. However, secondary exceptions that occur
3863 * during injection of any event -are- subject to exception intercepts.
3864 *
3865 * See AMD spec. 15.20 "Event Injection".
3866 */
3867 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3868 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3869 else
3870 {
3871 /*
3872 * Check and handle if the event being raised is intercepted.
3873 */
3874 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3875 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3876 return rcStrict0;
3877 }
3878 }
3879#endif
3880
3881 /*
3882 * Set NMI blocking if necessary.
3883 */
3884 if (fBlockNmi)
3885 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3886
3887 /*
3888 * Do recursion accounting.
3889 */
3890 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3891 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3892 if (pVCpu->iem.s.cXcptRecursions == 0)
3893 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3894 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3895 else
3896 {
3897 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3898 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3899 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3900
3901 if (pVCpu->iem.s.cXcptRecursions >= 4)
3902 {
3903#ifdef DEBUG_bird
3904 AssertFailed();
3905#endif
3906 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3907 }
3908
3909 /*
3910 * Evaluate the sequence of recurring events.
3911 */
3912 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3913 NULL /* pXcptRaiseInfo */);
3914 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3915 { /* likely */ }
3916 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3917 {
3918 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3919 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3920 u8Vector = X86_XCPT_DF;
3921 uErr = 0;
3922#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3923 /* VMX nested-guest #DF intercept needs to be checked here. */
3924 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3925 {
3926 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3927 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3928 return rcStrict0;
3929 }
3930#endif
3931 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3932 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3933 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3934 }
3935 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3936 {
3937 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3938 return iemInitiateCpuShutdown(pVCpu);
3939 }
3940 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3941 {
3942 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3943 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3944 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3945 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3946 return VERR_EM_GUEST_CPU_HANG;
3947 }
3948 else
3949 {
3950 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3951 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3952 return VERR_IEM_IPE_9;
3953 }
3954
3955 /*
3956 * The 'EXT' bit is set when an exception occurs during deliver of an external
3957 * event (such as an interrupt or earlier exception)[1]. Privileged software
3958 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3959 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3960 *
3961 * [1] - Intel spec. 6.13 "Error Code"
3962 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3963 * [3] - Intel Instruction reference for INT n.
3964 */
3965 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3966 && (fFlags & IEM_XCPT_FLAGS_ERR)
3967 && u8Vector != X86_XCPT_PF
3968 && u8Vector != X86_XCPT_DF)
3969 {
3970 uErr |= X86_TRAP_ERR_EXTERNAL;
3971 }
3972 }
3973
3974 pVCpu->iem.s.cXcptRecursions++;
3975 pVCpu->iem.s.uCurXcpt = u8Vector;
3976 pVCpu->iem.s.fCurXcpt = fFlags;
3977 pVCpu->iem.s.uCurXcptErr = uErr;
3978 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3979
3980 /*
3981 * Extensive logging.
3982 */
3983#if defined(LOG_ENABLED) && defined(IN_RING3)
3984 if (LogIs3Enabled())
3985 {
3986 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3987 PVM pVM = pVCpu->CTX_SUFF(pVM);
3988 char szRegs[4096];
3989 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3990 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3991 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3992 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3993 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3994 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3995 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3996 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3997 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3998 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3999 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4000 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4001 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4002 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4003 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4004 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4005 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4006 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4007 " efer=%016VR{efer}\n"
4008 " pat=%016VR{pat}\n"
4009 " sf_mask=%016VR{sf_mask}\n"
4010 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4011 " lstar=%016VR{lstar}\n"
4012 " star=%016VR{star} cstar=%016VR{cstar}\n"
4013 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4014 );
4015
4016 char szInstr[256];
4017 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4018 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4019 szInstr, sizeof(szInstr), NULL);
4020 Log3(("%s%s\n", szRegs, szInstr));
4021 }
4022#endif /* LOG_ENABLED */
4023
4024 /*
4025 * Stats.
4026 */
4027 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4028 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4029 else if (u8Vector <= X86_XCPT_LAST)
4030 {
4031 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4032 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4033 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4034 }
4035
4036 /*
4037 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4038 * to ensure that a stale TLB or paging cache entry will only cause one
4039 * spurious #PF.
4040 */
4041 if ( u8Vector == X86_XCPT_PF
4042 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4043 IEMTlbInvalidatePage(pVCpu, uCr2);
4044
4045 /*
4046 * Call the mode specific worker function.
4047 */
4048 VBOXSTRICTRC rcStrict;
4049 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4050 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4051 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4052 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4053 else
4054 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4055
4056 /* Flush the prefetch buffer. */
4057#ifdef IEM_WITH_CODE_TLB
4058 pVCpu->iem.s.pbInstrBuf = NULL;
4059#else
4060 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4061#endif
4062
4063 /*
4064 * Unwind.
4065 */
4066 pVCpu->iem.s.cXcptRecursions--;
4067 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4068 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4069 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4070 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4071 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4072 return rcStrict;
4073}
4074
4075#ifdef IEM_WITH_SETJMP
4076/**
4077 * See iemRaiseXcptOrInt. Will not return.
4078 */
4079DECL_NO_RETURN(void)
4080iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4081 uint8_t cbInstr,
4082 uint8_t u8Vector,
4083 uint32_t fFlags,
4084 uint16_t uErr,
4085 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4086{
4087 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4088 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4089}
4090#endif
4091
4092
4093/** \#DE - 00. */
4094VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4095{
4096 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4097}
4098
4099
4100/** \#DB - 01.
4101 * @note This automatically clear DR7.GD. */
4102VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4103{
4104 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4105 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4106 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4107}
4108
4109
4110/** \#BR - 05. */
4111VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4112{
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4114}
4115
4116
4117/** \#UD - 06. */
4118VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4119{
4120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4121}
4122
4123
4124/** \#NM - 07. */
4125VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4126{
4127 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4128}
4129
4130
4131/** \#TS(err) - 0a. */
4132VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4133{
4134 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4135}
4136
4137
4138/** \#TS(tr) - 0a. */
4139VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4140{
4141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4142 pVCpu->cpum.GstCtx.tr.Sel, 0);
4143}
4144
4145
4146/** \#TS(0) - 0a. */
4147VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4150 0, 0);
4151}
4152
4153
4154/** \#TS(err) - 0a. */
4155VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4156{
4157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4158 uSel & X86_SEL_MASK_OFF_RPL, 0);
4159}
4160
4161
4162/** \#NP(err) - 0b. */
4163VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4166}
4167
4168
4169/** \#NP(sel) - 0b. */
4170VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & ~X86_SEL_RPL, 0);
4174}
4175
4176
4177/** \#SS(seg) - 0c. */
4178VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4181 uSel & ~X86_SEL_RPL, 0);
4182}
4183
4184
4185/** \#SS(err) - 0c. */
4186VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4189}
4190
4191
4192/** \#GP(n) - 0d. */
4193VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4196}
4197
4198
4199/** \#GP(0) - 0d. */
4200VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4203}
4204
4205#ifdef IEM_WITH_SETJMP
4206/** \#GP(0) - 0d. */
4207DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4208{
4209 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4210}
4211#endif
4212
4213
4214/** \#GP(sel) - 0d. */
4215VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4218 Sel & ~X86_SEL_RPL, 0);
4219}
4220
4221
4222/** \#GP(0) - 0d. */
4223VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4224{
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4226}
4227
4228
4229/** \#GP(sel) - 0d. */
4230VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4231{
4232 NOREF(iSegReg); NOREF(fAccess);
4233 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4234 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4235}
4236
4237#ifdef IEM_WITH_SETJMP
4238/** \#GP(sel) - 0d, longjmp. */
4239DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4240{
4241 NOREF(iSegReg); NOREF(fAccess);
4242 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4243 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4244}
4245#endif
4246
4247/** \#GP(sel) - 0d. */
4248VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4249{
4250 NOREF(Sel);
4251 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4252}
4253
4254#ifdef IEM_WITH_SETJMP
4255/** \#GP(sel) - 0d, longjmp. */
4256DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4257{
4258 NOREF(Sel);
4259 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4260}
4261#endif
4262
4263
4264/** \#GP(sel) - 0d. */
4265VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4266{
4267 NOREF(iSegReg); NOREF(fAccess);
4268 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4269}
4270
4271#ifdef IEM_WITH_SETJMP
4272/** \#GP(sel) - 0d, longjmp. */
4273DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4274{
4275 NOREF(iSegReg); NOREF(fAccess);
4276 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4277}
4278#endif
4279
4280
4281/** \#PF(n) - 0e. */
4282VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4283{
4284 uint16_t uErr;
4285 switch (rc)
4286 {
4287 case VERR_PAGE_NOT_PRESENT:
4288 case VERR_PAGE_TABLE_NOT_PRESENT:
4289 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4290 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4291 uErr = 0;
4292 break;
4293
4294 default:
4295 AssertMsgFailed(("%Rrc\n", rc));
4296 RT_FALL_THRU();
4297 case VERR_ACCESS_DENIED:
4298 uErr = X86_TRAP_PF_P;
4299 break;
4300
4301 /** @todo reserved */
4302 }
4303
4304 if (IEM_GET_CPL(pVCpu) == 3)
4305 uErr |= X86_TRAP_PF_US;
4306
4307 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4308 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4309 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4310 uErr |= X86_TRAP_PF_ID;
4311
4312#if 0 /* This is so much non-sense, really. Why was it done like that? */
4313 /* Note! RW access callers reporting a WRITE protection fault, will clear
4314 the READ flag before calling. So, read-modify-write accesses (RW)
4315 can safely be reported as READ faults. */
4316 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4317 uErr |= X86_TRAP_PF_RW;
4318#else
4319 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4320 {
4321 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4322 /// (regardless of outcome of the comparison in the latter case).
4323 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4324 uErr |= X86_TRAP_PF_RW;
4325 }
4326#endif
4327
4328 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4329 of the memory operand rather than at the start of it. (Not sure what
4330 happens if it crosses a page boundrary.) The current heuristics for
4331 this is to report the #PF for the last byte if the access is more than
4332 64 bytes. This is probably not correct, but we can work that out later,
4333 main objective now is to get FXSAVE to work like for real hardware and
4334 make bs3-cpu-basic2 work. */
4335 if (cbAccess <= 64)
4336 { /* likely*/ }
4337 else
4338 GCPtrWhere += cbAccess - 1;
4339
4340 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4341 uErr, GCPtrWhere);
4342}
4343
4344#ifdef IEM_WITH_SETJMP
4345/** \#PF(n) - 0e, longjmp. */
4346DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4347 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4348{
4349 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4350}
4351#endif
4352
4353
4354/** \#MF(0) - 10. */
4355VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4356{
4357 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4358 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4359
4360 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4361 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4362 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4363}
4364
4365
4366/** \#AC(0) - 11. */
4367VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4368{
4369 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4370}
4371
4372#ifdef IEM_WITH_SETJMP
4373/** \#AC(0) - 11, longjmp. */
4374DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4375{
4376 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4377}
4378#endif
4379
4380
4381/** \#XF(0)/\#XM(0) - 19. */
4382VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4383{
4384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4385}
4386
4387
4388/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4389IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4390{
4391 NOREF(cbInstr);
4392 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4393}
4394
4395
4396/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4397IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4398{
4399 NOREF(cbInstr);
4400 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4401}
4402
4403
4404/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4405IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4406{
4407 NOREF(cbInstr);
4408 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4409}
4410
4411
4412/** @} */
4413
4414/** @name Common opcode decoders.
4415 * @{
4416 */
4417//#include <iprt/mem.h>
4418
4419/**
4420 * Used to add extra details about a stub case.
4421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4422 */
4423void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4424{
4425#if defined(LOG_ENABLED) && defined(IN_RING3)
4426 PVM pVM = pVCpu->CTX_SUFF(pVM);
4427 char szRegs[4096];
4428 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4429 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4430 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4431 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4432 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4433 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4434 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4435 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4436 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4437 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4438 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4439 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4440 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4441 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4442 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4443 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4444 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4445 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4446 " efer=%016VR{efer}\n"
4447 " pat=%016VR{pat}\n"
4448 " sf_mask=%016VR{sf_mask}\n"
4449 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4450 " lstar=%016VR{lstar}\n"
4451 " star=%016VR{star} cstar=%016VR{cstar}\n"
4452 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4453 );
4454
4455 char szInstr[256];
4456 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4457 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4458 szInstr, sizeof(szInstr), NULL);
4459
4460 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4461#else
4462 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4463#endif
4464}
4465
4466/** @} */
4467
4468
4469
4470/** @name Register Access.
4471 * @{
4472 */
4473
4474/**
4475 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4476 *
4477 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4478 * segment limit.
4479 *
4480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4481 * @param cbInstr Instruction size.
4482 * @param offNextInstr The offset of the next instruction.
4483 * @param enmEffOpSize Effective operand size.
4484 */
4485VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4486 IEMMODE enmEffOpSize) RT_NOEXCEPT
4487{
4488 switch (enmEffOpSize)
4489 {
4490 case IEMMODE_16BIT:
4491 {
4492 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4493 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4494 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4495 pVCpu->cpum.GstCtx.rip = uNewIp;
4496 else
4497 return iemRaiseGeneralProtectionFault0(pVCpu);
4498 break;
4499 }
4500
4501 case IEMMODE_32BIT:
4502 {
4503 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4504 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4505
4506 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4507 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4508 pVCpu->cpum.GstCtx.rip = uNewEip;
4509 else
4510 return iemRaiseGeneralProtectionFault0(pVCpu);
4511 break;
4512 }
4513
4514 case IEMMODE_64BIT:
4515 {
4516 Assert(IEM_IS_64BIT_CODE(pVCpu));
4517
4518 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4519 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4520 pVCpu->cpum.GstCtx.rip = uNewRip;
4521 else
4522 return iemRaiseGeneralProtectionFault0(pVCpu);
4523 break;
4524 }
4525
4526 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4527 }
4528
4529#ifndef IEM_WITH_CODE_TLB
4530 /* Flush the prefetch buffer. */
4531 pVCpu->iem.s.cbOpcode = cbInstr;
4532#endif
4533
4534 /*
4535 * Clear RF and finish the instruction (maybe raise #DB).
4536 */
4537 return iemRegFinishClearingRF(pVCpu);
4538}
4539
4540
4541/**
4542 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4543 *
4544 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4545 * segment limit.
4546 *
4547 * @returns Strict VBox status code.
4548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4549 * @param cbInstr Instruction size.
4550 * @param offNextInstr The offset of the next instruction.
4551 */
4552VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4553{
4554 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4555
4556 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4557 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4558 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4559 pVCpu->cpum.GstCtx.rip = uNewIp;
4560 else
4561 return iemRaiseGeneralProtectionFault0(pVCpu);
4562
4563#ifndef IEM_WITH_CODE_TLB
4564 /* Flush the prefetch buffer. */
4565 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4566#endif
4567
4568 /*
4569 * Clear RF and finish the instruction (maybe raise #DB).
4570 */
4571 return iemRegFinishClearingRF(pVCpu);
4572}
4573
4574
4575/**
4576 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4577 *
4578 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4579 * segment limit.
4580 *
4581 * @returns Strict VBox status code.
4582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4583 * @param cbInstr Instruction size.
4584 * @param offNextInstr The offset of the next instruction.
4585 * @param enmEffOpSize Effective operand size.
4586 */
4587VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4588 IEMMODE enmEffOpSize) RT_NOEXCEPT
4589{
4590 if (enmEffOpSize == IEMMODE_32BIT)
4591 {
4592 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4593
4594 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4595 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4596 pVCpu->cpum.GstCtx.rip = uNewEip;
4597 else
4598 return iemRaiseGeneralProtectionFault0(pVCpu);
4599 }
4600 else
4601 {
4602 Assert(enmEffOpSize == IEMMODE_64BIT);
4603
4604 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4605 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4606 pVCpu->cpum.GstCtx.rip = uNewRip;
4607 else
4608 return iemRaiseGeneralProtectionFault0(pVCpu);
4609 }
4610
4611#ifndef IEM_WITH_CODE_TLB
4612 /* Flush the prefetch buffer. */
4613 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4614#endif
4615
4616 /*
4617 * Clear RF and finish the instruction (maybe raise #DB).
4618 */
4619 return iemRegFinishClearingRF(pVCpu);
4620}
4621
4622
4623/**
4624 * Performs a near jump to the specified address.
4625 *
4626 * May raise a \#GP(0) if the new IP outside the code segment limit.
4627 *
4628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4629 * @param uNewIp The new IP value.
4630 */
4631VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4632{
4633 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4634 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4635 pVCpu->cpum.GstCtx.rip = uNewIp;
4636 else
4637 return iemRaiseGeneralProtectionFault0(pVCpu);
4638 /** @todo Test 16-bit jump in 64-bit mode. */
4639
4640#ifndef IEM_WITH_CODE_TLB
4641 /* Flush the prefetch buffer. */
4642 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4643#endif
4644
4645 /*
4646 * Clear RF and finish the instruction (maybe raise #DB).
4647 */
4648 return iemRegFinishClearingRF(pVCpu);
4649}
4650
4651
4652/**
4653 * Performs a near jump to the specified address.
4654 *
4655 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4656 *
4657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4658 * @param uNewEip The new EIP value.
4659 */
4660VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4661{
4662 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4663 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4664
4665 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4666 pVCpu->cpum.GstCtx.rip = uNewEip;
4667 else
4668 return iemRaiseGeneralProtectionFault0(pVCpu);
4669
4670#ifndef IEM_WITH_CODE_TLB
4671 /* Flush the prefetch buffer. */
4672 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4673#endif
4674
4675 /*
4676 * Clear RF and finish the instruction (maybe raise #DB).
4677 */
4678 return iemRegFinishClearingRF(pVCpu);
4679}
4680
4681
4682/**
4683 * Performs a near jump to the specified address.
4684 *
4685 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4686 * segment limit.
4687 *
4688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4689 * @param uNewRip The new RIP value.
4690 */
4691VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4692{
4693 Assert(IEM_IS_64BIT_CODE(pVCpu));
4694
4695 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4696 pVCpu->cpum.GstCtx.rip = uNewRip;
4697 else
4698 return iemRaiseGeneralProtectionFault0(pVCpu);
4699
4700#ifndef IEM_WITH_CODE_TLB
4701 /* Flush the prefetch buffer. */
4702 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4703#endif
4704
4705 /*
4706 * Clear RF and finish the instruction (maybe raise #DB).
4707 */
4708 return iemRegFinishClearingRF(pVCpu);
4709}
4710
4711/** @} */
4712
4713
4714/** @name FPU access and helpers.
4715 *
4716 * @{
4717 */
4718
4719/**
4720 * Updates the x87.DS and FPUDP registers.
4721 *
4722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4723 * @param pFpuCtx The FPU context.
4724 * @param iEffSeg The effective segment register.
4725 * @param GCPtrEff The effective address relative to @a iEffSeg.
4726 */
4727DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4728{
4729 RTSEL sel;
4730 switch (iEffSeg)
4731 {
4732 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4733 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4734 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4735 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4736 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4737 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4738 default:
4739 AssertMsgFailed(("%d\n", iEffSeg));
4740 sel = pVCpu->cpum.GstCtx.ds.Sel;
4741 }
4742 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4743 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4744 {
4745 pFpuCtx->DS = 0;
4746 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4747 }
4748 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4749 {
4750 pFpuCtx->DS = sel;
4751 pFpuCtx->FPUDP = GCPtrEff;
4752 }
4753 else
4754 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4755}
4756
4757
4758/**
4759 * Rotates the stack registers in the push direction.
4760 *
4761 * @param pFpuCtx The FPU context.
4762 * @remarks This is a complete waste of time, but fxsave stores the registers in
4763 * stack order.
4764 */
4765DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4766{
4767 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4768 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4769 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4770 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4771 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4772 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4773 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4774 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4775 pFpuCtx->aRegs[0].r80 = r80Tmp;
4776}
4777
4778
4779/**
4780 * Rotates the stack registers in the pop direction.
4781 *
4782 * @param pFpuCtx The FPU context.
4783 * @remarks This is a complete waste of time, but fxsave stores the registers in
4784 * stack order.
4785 */
4786DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4787{
4788 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4789 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4790 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4791 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4792 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4793 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4794 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4795 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4796 pFpuCtx->aRegs[7].r80 = r80Tmp;
4797}
4798
4799
4800/**
4801 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4802 * exception prevents it.
4803 *
4804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4805 * @param pResult The FPU operation result to push.
4806 * @param pFpuCtx The FPU context.
4807 */
4808static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4809{
4810 /* Update FSW and bail if there are pending exceptions afterwards. */
4811 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4812 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4813 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4814 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4815 {
4816 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4817 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4818 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4819 pFpuCtx->FSW = fFsw;
4820 return;
4821 }
4822
4823 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4824 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4825 {
4826 /* All is fine, push the actual value. */
4827 pFpuCtx->FTW |= RT_BIT(iNewTop);
4828 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4829 }
4830 else if (pFpuCtx->FCW & X86_FCW_IM)
4831 {
4832 /* Masked stack overflow, push QNaN. */
4833 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4834 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4835 }
4836 else
4837 {
4838 /* Raise stack overflow, don't push anything. */
4839 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4840 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4841 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4842 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4843 return;
4844 }
4845
4846 fFsw &= ~X86_FSW_TOP_MASK;
4847 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4848 pFpuCtx->FSW = fFsw;
4849
4850 iemFpuRotateStackPush(pFpuCtx);
4851 RT_NOREF(pVCpu);
4852}
4853
4854
4855/**
4856 * Stores a result in a FPU register and updates the FSW and FTW.
4857 *
4858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4859 * @param pFpuCtx The FPU context.
4860 * @param pResult The result to store.
4861 * @param iStReg Which FPU register to store it in.
4862 */
4863static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4864{
4865 Assert(iStReg < 8);
4866 uint16_t fNewFsw = pFpuCtx->FSW;
4867 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4868 fNewFsw &= ~X86_FSW_C_MASK;
4869 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4870 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4871 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4872 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4873 pFpuCtx->FSW = fNewFsw;
4874 pFpuCtx->FTW |= RT_BIT(iReg);
4875 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4876 RT_NOREF(pVCpu);
4877}
4878
4879
4880/**
4881 * Only updates the FPU status word (FSW) with the result of the current
4882 * instruction.
4883 *
4884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4885 * @param pFpuCtx The FPU context.
4886 * @param u16FSW The FSW output of the current instruction.
4887 */
4888static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4889{
4890 uint16_t fNewFsw = pFpuCtx->FSW;
4891 fNewFsw &= ~X86_FSW_C_MASK;
4892 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4893 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4894 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4895 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4896 pFpuCtx->FSW = fNewFsw;
4897 RT_NOREF(pVCpu);
4898}
4899
4900
4901/**
4902 * Pops one item off the FPU stack if no pending exception prevents it.
4903 *
4904 * @param pFpuCtx The FPU context.
4905 */
4906static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4907{
4908 /* Check pending exceptions. */
4909 uint16_t uFSW = pFpuCtx->FSW;
4910 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4911 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4912 return;
4913
4914 /* TOP--. */
4915 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4916 uFSW &= ~X86_FSW_TOP_MASK;
4917 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4918 pFpuCtx->FSW = uFSW;
4919
4920 /* Mark the previous ST0 as empty. */
4921 iOldTop >>= X86_FSW_TOP_SHIFT;
4922 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4923
4924 /* Rotate the registers. */
4925 iemFpuRotateStackPop(pFpuCtx);
4926}
4927
4928
4929/**
4930 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4931 *
4932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4933 * @param pResult The FPU operation result to push.
4934 * @param uFpuOpcode The FPU opcode value.
4935 */
4936void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4937{
4938 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4939 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4940 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4946 * and sets FPUDP and FPUDS.
4947 *
4948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4949 * @param pResult The FPU operation result to push.
4950 * @param iEffSeg The effective segment register.
4951 * @param GCPtrEff The effective address relative to @a iEffSeg.
4952 * @param uFpuOpcode The FPU opcode value.
4953 */
4954void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4955 uint16_t uFpuOpcode) RT_NOEXCEPT
4956{
4957 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4958 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4959 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4960 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4961}
4962
4963
4964/**
4965 * Replace ST0 with the first value and push the second onto the FPU stack,
4966 * unless a pending exception prevents it.
4967 *
4968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4969 * @param pResult The FPU operation result to store and push.
4970 * @param uFpuOpcode The FPU opcode value.
4971 */
4972void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4973{
4974 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4975 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4976
4977 /* Update FSW and bail if there are pending exceptions afterwards. */
4978 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4979 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4980 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4981 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4982 {
4983 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4984 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4985 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4986 pFpuCtx->FSW = fFsw;
4987 return;
4988 }
4989
4990 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4991 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4992 {
4993 /* All is fine, push the actual value. */
4994 pFpuCtx->FTW |= RT_BIT(iNewTop);
4995 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4996 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4997 }
4998 else if (pFpuCtx->FCW & X86_FCW_IM)
4999 {
5000 /* Masked stack overflow, push QNaN. */
5001 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5002 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5003 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5004 }
5005 else
5006 {
5007 /* Raise stack overflow, don't push anything. */
5008 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5009 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5010 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5011 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5012 return;
5013 }
5014
5015 fFsw &= ~X86_FSW_TOP_MASK;
5016 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5017 pFpuCtx->FSW = fFsw;
5018
5019 iemFpuRotateStackPush(pFpuCtx);
5020}
5021
5022
5023/**
5024 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5025 * FOP.
5026 *
5027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5028 * @param pResult The result to store.
5029 * @param iStReg Which FPU register to store it in.
5030 * @param uFpuOpcode The FPU opcode value.
5031 */
5032void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5033{
5034 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5035 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5036 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5037}
5038
5039
5040/**
5041 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5042 * FOP, and then pops the stack.
5043 *
5044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5045 * @param pResult The result to store.
5046 * @param iStReg Which FPU register to store it in.
5047 * @param uFpuOpcode The FPU opcode value.
5048 */
5049void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5050{
5051 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5052 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5053 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5054 iemFpuMaybePopOne(pFpuCtx);
5055}
5056
5057
5058/**
5059 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5060 * FPUDP, and FPUDS.
5061 *
5062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5063 * @param pResult The result to store.
5064 * @param iStReg Which FPU register to store it in.
5065 * @param iEffSeg The effective memory operand selector register.
5066 * @param GCPtrEff The effective memory operand offset.
5067 * @param uFpuOpcode The FPU opcode value.
5068 */
5069void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5070 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5071{
5072 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5073 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5074 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5075 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5076}
5077
5078
5079/**
5080 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5081 * FPUDP, and FPUDS, and then pops the stack.
5082 *
5083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5084 * @param pResult The result to store.
5085 * @param iStReg Which FPU register to store it in.
5086 * @param iEffSeg The effective memory operand selector register.
5087 * @param GCPtrEff The effective memory operand offset.
5088 * @param uFpuOpcode The FPU opcode value.
5089 */
5090void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5091 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5092{
5093 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5094 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5095 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5096 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5097 iemFpuMaybePopOne(pFpuCtx);
5098}
5099
5100
5101/**
5102 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5103 *
5104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5105 * @param uFpuOpcode The FPU opcode value.
5106 */
5107void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5108{
5109 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111}
5112
5113
5114/**
5115 * Updates the FSW, FOP, FPUIP, and FPUCS.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param u16FSW The FSW from the current instruction.
5119 * @param uFpuOpcode The FPU opcode value.
5120 */
5121void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5122{
5123 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5124 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5125 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141 iemFpuMaybePopOne(pFpuCtx);
5142}
5143
5144
5145/**
5146 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5147 *
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param u16FSW The FSW from the current instruction.
5150 * @param iEffSeg The effective memory operand selector register.
5151 * @param GCPtrEff The effective memory operand offset.
5152 * @param uFpuOpcode The FPU opcode value.
5153 */
5154void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5155{
5156 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5157 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5158 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5159 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5160}
5161
5162
5163/**
5164 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5165 *
5166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5167 * @param u16FSW The FSW from the current instruction.
5168 * @param uFpuOpcode The FPU opcode value.
5169 */
5170void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5171{
5172 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175 iemFpuMaybePopOne(pFpuCtx);
5176 iemFpuMaybePopOne(pFpuCtx);
5177}
5178
5179
5180/**
5181 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5182 *
5183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5184 * @param u16FSW The FSW from the current instruction.
5185 * @param iEffSeg The effective memory operand selector register.
5186 * @param GCPtrEff The effective memory operand offset.
5187 * @param uFpuOpcode The FPU opcode value.
5188 */
5189void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5190 uint16_t uFpuOpcode) RT_NOEXCEPT
5191{
5192 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5193 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5194 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5195 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5196 iemFpuMaybePopOne(pFpuCtx);
5197}
5198
5199
5200/**
5201 * Worker routine for raising an FPU stack underflow exception.
5202 *
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param pFpuCtx The FPU context.
5205 * @param iStReg The stack register being accessed.
5206 */
5207static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5208{
5209 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5210 if (pFpuCtx->FCW & X86_FCW_IM)
5211 {
5212 /* Masked underflow. */
5213 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5214 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5215 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5216 if (iStReg != UINT8_MAX)
5217 {
5218 pFpuCtx->FTW |= RT_BIT(iReg);
5219 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5220 }
5221 }
5222 else
5223 {
5224 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5225 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5226 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5227 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5228 }
5229 RT_NOREF(pVCpu);
5230}
5231
5232
5233/**
5234 * Raises a FPU stack underflow exception.
5235 *
5236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5237 * @param iStReg The destination register that should be loaded
5238 * with QNaN if \#IS is not masked. Specify
5239 * UINT8_MAX if none (like for fcom).
5240 * @param uFpuOpcode The FPU opcode value.
5241 */
5242void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5243{
5244 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5245 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5246 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5247}
5248
5249
5250void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5251{
5252 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5253 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5254 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5255 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5256}
5257
5258
5259void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5260{
5261 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5262 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5263 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5264 iemFpuMaybePopOne(pFpuCtx);
5265}
5266
5267
5268void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5269 uint16_t uFpuOpcode) RT_NOEXCEPT
5270{
5271 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5272 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5273 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5274 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5275 iemFpuMaybePopOne(pFpuCtx);
5276}
5277
5278
5279void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5280{
5281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5282 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5283 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5284 iemFpuMaybePopOne(pFpuCtx);
5285 iemFpuMaybePopOne(pFpuCtx);
5286}
5287
5288
5289void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5290{
5291 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5292 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5293
5294 if (pFpuCtx->FCW & X86_FCW_IM)
5295 {
5296 /* Masked overflow - Push QNaN. */
5297 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5298 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5299 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5300 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5301 pFpuCtx->FTW |= RT_BIT(iNewTop);
5302 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5303 iemFpuRotateStackPush(pFpuCtx);
5304 }
5305 else
5306 {
5307 /* Exception pending - don't change TOP or the register stack. */
5308 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5309 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5310 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5311 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5312 }
5313}
5314
5315
5316void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5320
5321 if (pFpuCtx->FCW & X86_FCW_IM)
5322 {
5323 /* Masked overflow - Push QNaN. */
5324 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5325 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5326 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5327 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5328 pFpuCtx->FTW |= RT_BIT(iNewTop);
5329 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5330 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5331 iemFpuRotateStackPush(pFpuCtx);
5332 }
5333 else
5334 {
5335 /* Exception pending - don't change TOP or the register stack. */
5336 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5337 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5338 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5339 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5340 }
5341}
5342
5343
5344/**
5345 * Worker routine for raising an FPU stack overflow exception on a push.
5346 *
5347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5348 * @param pFpuCtx The FPU context.
5349 */
5350static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5351{
5352 if (pFpuCtx->FCW & X86_FCW_IM)
5353 {
5354 /* Masked overflow. */
5355 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5356 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5357 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5358 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5359 pFpuCtx->FTW |= RT_BIT(iNewTop);
5360 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5361 iemFpuRotateStackPush(pFpuCtx);
5362 }
5363 else
5364 {
5365 /* Exception pending - don't change TOP or the register stack. */
5366 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5367 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5368 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5369 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5370 }
5371 RT_NOREF(pVCpu);
5372}
5373
5374
5375/**
5376 * Raises a FPU stack overflow exception on a push.
5377 *
5378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5379 * @param uFpuOpcode The FPU opcode value.
5380 */
5381void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5382{
5383 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5384 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5385 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5386}
5387
5388
5389/**
5390 * Raises a FPU stack overflow exception on a push with a memory operand.
5391 *
5392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5393 * @param iEffSeg The effective memory operand selector register.
5394 * @param GCPtrEff The effective memory operand offset.
5395 * @param uFpuOpcode The FPU opcode value.
5396 */
5397void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5398{
5399 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5400 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5401 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5402 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5403}
5404
5405/** @} */
5406
5407
5408/** @name SSE+AVX SIMD access and helpers.
5409 *
5410 * @{
5411 */
5412/**
5413 * Stores a result in a SIMD XMM register, updates the MXCSR.
5414 *
5415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5416 * @param pResult The result to store.
5417 * @param iXmmReg Which SIMD XMM register to store the result in.
5418 */
5419void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5420{
5421 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5422 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5423
5424 /* The result is only updated if there is no unmasked exception pending. */
5425 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5426 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5427 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5428}
5429
5430
5431/**
5432 * Updates the MXCSR.
5433 *
5434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5435 * @param fMxcsr The new MXCSR value.
5436 */
5437void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5438{
5439 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5440 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5441}
5442/** @} */
5443
5444
5445/** @name Memory access.
5446 *
5447 * @{
5448 */
5449
5450
5451/**
5452 * Updates the IEMCPU::cbWritten counter if applicable.
5453 *
5454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5455 * @param fAccess The access being accounted for.
5456 * @param cbMem The access size.
5457 */
5458DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5459{
5460 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5461 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5462 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5463}
5464
5465
5466/**
5467 * Applies the segment limit, base and attributes.
5468 *
5469 * This may raise a \#GP or \#SS.
5470 *
5471 * @returns VBox strict status code.
5472 *
5473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5474 * @param fAccess The kind of access which is being performed.
5475 * @param iSegReg The index of the segment register to apply.
5476 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5477 * TSS, ++).
5478 * @param cbMem The access size.
5479 * @param pGCPtrMem Pointer to the guest memory address to apply
5480 * segmentation to. Input and output parameter.
5481 */
5482VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5483{
5484 if (iSegReg == UINT8_MAX)
5485 return VINF_SUCCESS;
5486
5487 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5488 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5489 switch (IEM_GET_CPU_MODE(pVCpu))
5490 {
5491 case IEMMODE_16BIT:
5492 case IEMMODE_32BIT:
5493 {
5494 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5495 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5496
5497 if ( pSel->Attr.n.u1Present
5498 && !pSel->Attr.n.u1Unusable)
5499 {
5500 Assert(pSel->Attr.n.u1DescType);
5501 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5502 {
5503 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5504 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5505 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5506
5507 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5508 {
5509 /** @todo CPL check. */
5510 }
5511
5512 /*
5513 * There are two kinds of data selectors, normal and expand down.
5514 */
5515 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5516 {
5517 if ( GCPtrFirst32 > pSel->u32Limit
5518 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5519 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5520 }
5521 else
5522 {
5523 /*
5524 * The upper boundary is defined by the B bit, not the G bit!
5525 */
5526 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5527 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5528 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5529 }
5530 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5531 }
5532 else
5533 {
5534 /*
5535 * Code selector and usually be used to read thru, writing is
5536 * only permitted in real and V8086 mode.
5537 */
5538 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5539 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5540 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5541 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5542 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5543
5544 if ( GCPtrFirst32 > pSel->u32Limit
5545 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5546 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5547
5548 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5549 {
5550 /** @todo CPL check. */
5551 }
5552
5553 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5554 }
5555 }
5556 else
5557 return iemRaiseGeneralProtectionFault0(pVCpu);
5558 return VINF_SUCCESS;
5559 }
5560
5561 case IEMMODE_64BIT:
5562 {
5563 RTGCPTR GCPtrMem = *pGCPtrMem;
5564 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5565 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5566
5567 Assert(cbMem >= 1);
5568 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5569 return VINF_SUCCESS;
5570 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5571 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5572 return iemRaiseGeneralProtectionFault0(pVCpu);
5573 }
5574
5575 default:
5576 AssertFailedReturn(VERR_IEM_IPE_7);
5577 }
5578}
5579
5580
5581/**
5582 * Translates a virtual address to a physical physical address and checks if we
5583 * can access the page as specified.
5584 *
5585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5586 * @param GCPtrMem The virtual address.
5587 * @param cbAccess The access size, for raising \#PF correctly for
5588 * FXSAVE and such.
5589 * @param fAccess The intended access.
5590 * @param pGCPhysMem Where to return the physical address.
5591 */
5592VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5593 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5594{
5595 /** @todo Need a different PGM interface here. We're currently using
5596 * generic / REM interfaces. this won't cut it for R0. */
5597 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5598 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5599 * here. */
5600 PGMPTWALK Walk;
5601 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5602 if (RT_FAILURE(rc))
5603 {
5604 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5605 /** @todo Check unassigned memory in unpaged mode. */
5606 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5607#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5608 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5609 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5610#endif
5611 *pGCPhysMem = NIL_RTGCPHYS;
5612 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5613 }
5614
5615 /* If the page is writable and does not have the no-exec bit set, all
5616 access is allowed. Otherwise we'll have to check more carefully... */
5617 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5618 {
5619 /* Write to read only memory? */
5620 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5621 && !(Walk.fEffective & X86_PTE_RW)
5622 && ( ( IEM_GET_CPL(pVCpu) == 3
5623 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5624 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5625 {
5626 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5627 *pGCPhysMem = NIL_RTGCPHYS;
5628#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5629 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5630 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5631#endif
5632 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5633 }
5634
5635 /* Kernel memory accessed by userland? */
5636 if ( !(Walk.fEffective & X86_PTE_US)
5637 && IEM_GET_CPL(pVCpu) == 3
5638 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5639 {
5640 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5641 *pGCPhysMem = NIL_RTGCPHYS;
5642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5643 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5644 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5645#endif
5646 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5647 }
5648
5649 /* Executing non-executable memory? */
5650 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5651 && (Walk.fEffective & X86_PTE_PAE_NX)
5652 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5653 {
5654 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5655 *pGCPhysMem = NIL_RTGCPHYS;
5656#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5657 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5658 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5659#endif
5660 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5661 VERR_ACCESS_DENIED);
5662 }
5663 }
5664
5665 /*
5666 * Set the dirty / access flags.
5667 * ASSUMES this is set when the address is translated rather than on committ...
5668 */
5669 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5670 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5671 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5672 {
5673 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5674 AssertRC(rc2);
5675 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5676 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5677 }
5678
5679 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5680 *pGCPhysMem = GCPhys;
5681 return VINF_SUCCESS;
5682}
5683
5684
5685/**
5686 * Looks up a memory mapping entry.
5687 *
5688 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5690 * @param pvMem The memory address.
5691 * @param fAccess The access to.
5692 */
5693DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5694{
5695 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5696 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5697 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5698 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5699 return 0;
5700 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5701 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5702 return 1;
5703 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5704 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5705 return 2;
5706 return VERR_NOT_FOUND;
5707}
5708
5709
5710/**
5711 * Finds a free memmap entry when using iNextMapping doesn't work.
5712 *
5713 * @returns Memory mapping index, 1024 on failure.
5714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5715 */
5716static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5717{
5718 /*
5719 * The easy case.
5720 */
5721 if (pVCpu->iem.s.cActiveMappings == 0)
5722 {
5723 pVCpu->iem.s.iNextMapping = 1;
5724 return 0;
5725 }
5726
5727 /* There should be enough mappings for all instructions. */
5728 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5729
5730 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5731 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5732 return i;
5733
5734 AssertFailedReturn(1024);
5735}
5736
5737
5738/**
5739 * Commits a bounce buffer that needs writing back and unmaps it.
5740 *
5741 * @returns Strict VBox status code.
5742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5743 * @param iMemMap The index of the buffer to commit.
5744 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5745 * Always false in ring-3, obviously.
5746 */
5747static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5748{
5749 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5750 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5751#ifdef IN_RING3
5752 Assert(!fPostponeFail);
5753 RT_NOREF_PV(fPostponeFail);
5754#endif
5755
5756 /*
5757 * Do the writing.
5758 */
5759 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5760 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5761 {
5762 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5763 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5764 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5765 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5766 {
5767 /*
5768 * Carefully and efficiently dealing with access handler return
5769 * codes make this a little bloated.
5770 */
5771 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5772 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5773 pbBuf,
5774 cbFirst,
5775 PGMACCESSORIGIN_IEM);
5776 if (rcStrict == VINF_SUCCESS)
5777 {
5778 if (cbSecond)
5779 {
5780 rcStrict = PGMPhysWrite(pVM,
5781 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5782 pbBuf + cbFirst,
5783 cbSecond,
5784 PGMACCESSORIGIN_IEM);
5785 if (rcStrict == VINF_SUCCESS)
5786 { /* nothing */ }
5787 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5788 {
5789 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5790 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5791 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5792 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5793 }
5794#ifndef IN_RING3
5795 else if (fPostponeFail)
5796 {
5797 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5800 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5801 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5802 return iemSetPassUpStatus(pVCpu, rcStrict);
5803 }
5804#endif
5805 else
5806 {
5807 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5810 return rcStrict;
5811 }
5812 }
5813 }
5814 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5815 {
5816 if (!cbSecond)
5817 {
5818 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5820 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5821 }
5822 else
5823 {
5824 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5826 pbBuf + cbFirst,
5827 cbSecond,
5828 PGMACCESSORIGIN_IEM);
5829 if (rcStrict2 == VINF_SUCCESS)
5830 {
5831 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5832 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5834 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5835 }
5836 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5837 {
5838 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5841 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5842 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5843 }
5844#ifndef IN_RING3
5845 else if (fPostponeFail)
5846 {
5847 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5850 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5851 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5852 return iemSetPassUpStatus(pVCpu, rcStrict);
5853 }
5854#endif
5855 else
5856 {
5857 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5859 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5860 return rcStrict2;
5861 }
5862 }
5863 }
5864#ifndef IN_RING3
5865 else if (fPostponeFail)
5866 {
5867 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5869 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5870 if (!cbSecond)
5871 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5872 else
5873 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5874 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5875 return iemSetPassUpStatus(pVCpu, rcStrict);
5876 }
5877#endif
5878 else
5879 {
5880 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5881 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5883 return rcStrict;
5884 }
5885 }
5886 else
5887 {
5888 /*
5889 * No access handlers, much simpler.
5890 */
5891 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5892 if (RT_SUCCESS(rc))
5893 {
5894 if (cbSecond)
5895 {
5896 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5897 if (RT_SUCCESS(rc))
5898 { /* likely */ }
5899 else
5900 {
5901 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5904 return rc;
5905 }
5906 }
5907 }
5908 else
5909 {
5910 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5913 return rc;
5914 }
5915 }
5916 }
5917
5918#if defined(IEM_LOG_MEMORY_WRITES)
5919 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5920 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5921 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5922 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5923 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5924 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5925
5926 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5927 g_cbIemWrote = cbWrote;
5928 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5929#endif
5930
5931 /*
5932 * Free the mapping entry.
5933 */
5934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5935 Assert(pVCpu->iem.s.cActiveMappings != 0);
5936 pVCpu->iem.s.cActiveMappings--;
5937 return VINF_SUCCESS;
5938}
5939
5940
5941/**
5942 * iemMemMap worker that deals with a request crossing pages.
5943 */
5944static VBOXSTRICTRC
5945iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5946{
5947 Assert(cbMem <= GUEST_PAGE_SIZE);
5948
5949 /*
5950 * Do the address translations.
5951 */
5952 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5953 RTGCPHYS GCPhysFirst;
5954 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5955 if (rcStrict != VINF_SUCCESS)
5956 return rcStrict;
5957 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5958
5959 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5960 RTGCPHYS GCPhysSecond;
5961 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5962 cbSecondPage, fAccess, &GCPhysSecond);
5963 if (rcStrict != VINF_SUCCESS)
5964 return rcStrict;
5965 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5966 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5967
5968 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5969
5970 /*
5971 * Read in the current memory content if it's a read, execute or partial
5972 * write access.
5973 */
5974 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5975
5976 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5977 {
5978 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5979 {
5980 /*
5981 * Must carefully deal with access handler status codes here,
5982 * makes the code a bit bloated.
5983 */
5984 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5985 if (rcStrict == VINF_SUCCESS)
5986 {
5987 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5988 if (rcStrict == VINF_SUCCESS)
5989 { /*likely */ }
5990 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5991 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5992 else
5993 {
5994 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5995 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5996 return rcStrict;
5997 }
5998 }
5999 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6000 {
6001 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6002 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6003 {
6004 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6005 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6006 }
6007 else
6008 {
6009 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6011 return rcStrict2;
6012 }
6013 }
6014 else
6015 {
6016 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6017 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6018 return rcStrict;
6019 }
6020 }
6021 else
6022 {
6023 /*
6024 * No informational status codes here, much more straight forward.
6025 */
6026 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6027 if (RT_SUCCESS(rc))
6028 {
6029 Assert(rc == VINF_SUCCESS);
6030 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6031 if (RT_SUCCESS(rc))
6032 Assert(rc == VINF_SUCCESS);
6033 else
6034 {
6035 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6036 return rc;
6037 }
6038 }
6039 else
6040 {
6041 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6042 return rc;
6043 }
6044 }
6045 }
6046#ifdef VBOX_STRICT
6047 else
6048 memset(pbBuf, 0xcc, cbMem);
6049 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6050 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6051#endif
6052 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6053
6054 /*
6055 * Commit the bounce buffer entry.
6056 */
6057 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6058 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6059 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6060 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6061 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6062 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6063 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6064 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6065 pVCpu->iem.s.cActiveMappings++;
6066
6067 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6068 *ppvMem = pbBuf;
6069 return VINF_SUCCESS;
6070}
6071
6072
6073/**
6074 * iemMemMap woker that deals with iemMemPageMap failures.
6075 */
6076static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6077 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6078{
6079 /*
6080 * Filter out conditions we can handle and the ones which shouldn't happen.
6081 */
6082 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6083 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6084 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6085 {
6086 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6087 return rcMap;
6088 }
6089 pVCpu->iem.s.cPotentialExits++;
6090
6091 /*
6092 * Read in the current memory content if it's a read, execute or partial
6093 * write access.
6094 */
6095 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6096 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6097 {
6098 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6099 memset(pbBuf, 0xff, cbMem);
6100 else
6101 {
6102 int rc;
6103 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6104 {
6105 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6106 if (rcStrict == VINF_SUCCESS)
6107 { /* nothing */ }
6108 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6109 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6110 else
6111 {
6112 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6113 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6114 return rcStrict;
6115 }
6116 }
6117 else
6118 {
6119 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6120 if (RT_SUCCESS(rc))
6121 { /* likely */ }
6122 else
6123 {
6124 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6125 GCPhysFirst, rc));
6126 return rc;
6127 }
6128 }
6129 }
6130 }
6131#ifdef VBOX_STRICT
6132 else
6133 memset(pbBuf, 0xcc, cbMem);
6134#endif
6135#ifdef VBOX_STRICT
6136 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6137 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6138#endif
6139
6140 /*
6141 * Commit the bounce buffer entry.
6142 */
6143 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6144 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6145 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6146 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6147 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6148 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6149 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6150 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6151 pVCpu->iem.s.cActiveMappings++;
6152
6153 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6154 *ppvMem = pbBuf;
6155 return VINF_SUCCESS;
6156}
6157
6158
6159
6160/**
6161 * Maps the specified guest memory for the given kind of access.
6162 *
6163 * This may be using bounce buffering of the memory if it's crossing a page
6164 * boundary or if there is an access handler installed for any of it. Because
6165 * of lock prefix guarantees, we're in for some extra clutter when this
6166 * happens.
6167 *
6168 * This may raise a \#GP, \#SS, \#PF or \#AC.
6169 *
6170 * @returns VBox strict status code.
6171 *
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param ppvMem Where to return the pointer to the mapped memory.
6174 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6175 * 8, 12, 16, 32 or 512. When used by string operations
6176 * it can be up to a page.
6177 * @param iSegReg The index of the segment register to use for this
6178 * access. The base and limits are checked. Use UINT8_MAX
6179 * to indicate that no segmentation is required (for IDT,
6180 * GDT and LDT accesses).
6181 * @param GCPtrMem The address of the guest memory.
6182 * @param fAccess How the memory is being accessed. The
6183 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6184 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6185 * when raising exceptions.
6186 * @param uAlignCtl Alignment control:
6187 * - Bits 15:0 is the alignment mask.
6188 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6189 * IEM_MEMMAP_F_ALIGN_SSE, and
6190 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6191 * Pass zero to skip alignment.
6192 */
6193VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6194 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6195{
6196 /*
6197 * Check the input and figure out which mapping entry to use.
6198 */
6199 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6200 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6201 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6202 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6203 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6204
6205 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6206 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6207 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6208 {
6209 iMemMap = iemMemMapFindFree(pVCpu);
6210 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6211 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6212 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6213 pVCpu->iem.s.aMemMappings[2].fAccess),
6214 VERR_IEM_IPE_9);
6215 }
6216
6217 /*
6218 * Map the memory, checking that we can actually access it. If something
6219 * slightly complicated happens, fall back on bounce buffering.
6220 */
6221 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6222 if (rcStrict == VINF_SUCCESS)
6223 { /* likely */ }
6224 else
6225 return rcStrict;
6226
6227 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6228 { /* likely */ }
6229 else
6230 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6231
6232 /*
6233 * Alignment check.
6234 */
6235 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6236 { /* likelyish */ }
6237 else
6238 {
6239 /* Misaligned access. */
6240 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6241 {
6242 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6243 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6244 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6245 {
6246 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6247
6248 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6249 return iemRaiseAlignmentCheckException(pVCpu);
6250 }
6251 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6252 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6253 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6254 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6255 * that's what FXSAVE does on a 10980xe. */
6256 && iemMemAreAlignmentChecksEnabled(pVCpu))
6257 return iemRaiseAlignmentCheckException(pVCpu);
6258 else
6259 return iemRaiseGeneralProtectionFault0(pVCpu);
6260 }
6261 }
6262
6263#ifdef IEM_WITH_DATA_TLB
6264 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6265
6266 /*
6267 * Get the TLB entry for this page.
6268 */
6269 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6270 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6271 if (pTlbe->uTag == uTag)
6272 {
6273# ifdef VBOX_WITH_STATISTICS
6274 pVCpu->iem.s.DataTlb.cTlbHits++;
6275# endif
6276 }
6277 else
6278 {
6279 pVCpu->iem.s.DataTlb.cTlbMisses++;
6280 PGMPTWALK Walk;
6281 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6282 if (RT_FAILURE(rc))
6283 {
6284 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6285# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6286 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6287 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6288# endif
6289 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6290 }
6291
6292 Assert(Walk.fSucceeded);
6293 pTlbe->uTag = uTag;
6294 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6295 pTlbe->GCPhys = Walk.GCPhys;
6296 pTlbe->pbMappingR3 = NULL;
6297 }
6298
6299 /*
6300 * Check TLB page table level access flags.
6301 */
6302 /* If the page is either supervisor only or non-writable, we need to do
6303 more careful access checks. */
6304 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6305 {
6306 /* Write to read only memory? */
6307 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6308 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6309 && ( ( IEM_GET_CPL(pVCpu) == 3
6310 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6311 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6312 {
6313 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6314# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6315 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6316 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6317# endif
6318 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6319 }
6320
6321 /* Kernel memory accessed by userland? */
6322 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6323 && IEM_GET_CPL(pVCpu) == 3
6324 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6325 {
6326 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6327# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6328 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6329 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6330# endif
6331 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6332 }
6333 }
6334
6335 /*
6336 * Set the dirty / access flags.
6337 * ASSUMES this is set when the address is translated rather than on commit...
6338 */
6339 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6340 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6341 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6342 {
6343 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6344 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6345 AssertRC(rc2);
6346 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6347 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6348 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6349 }
6350
6351 /*
6352 * Look up the physical page info if necessary.
6353 */
6354 uint8_t *pbMem = NULL;
6355 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6356# ifdef IN_RING3
6357 pbMem = pTlbe->pbMappingR3;
6358# else
6359 pbMem = NULL;
6360# endif
6361 else
6362 {
6363 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6364 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6365 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6366 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6367 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6368 { /* likely */ }
6369 else
6370 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6371 pTlbe->pbMappingR3 = NULL;
6372 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6373 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6374 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6375 &pbMem, &pTlbe->fFlagsAndPhysRev);
6376 AssertRCReturn(rc, rc);
6377# ifdef IN_RING3
6378 pTlbe->pbMappingR3 = pbMem;
6379# endif
6380 }
6381
6382 /*
6383 * Check the physical page level access and mapping.
6384 */
6385 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6386 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6387 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6388 { /* probably likely */ }
6389 else
6390 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6391 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6392 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6393 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6394 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6395 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6396
6397 if (pbMem)
6398 {
6399 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6400 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6401 fAccess |= IEM_ACCESS_NOT_LOCKED;
6402 }
6403 else
6404 {
6405 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6406 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6407 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6408 if (rcStrict != VINF_SUCCESS)
6409 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6410 }
6411
6412 void * const pvMem = pbMem;
6413
6414 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6415 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6416 if (fAccess & IEM_ACCESS_TYPE_READ)
6417 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6418
6419#else /* !IEM_WITH_DATA_TLB */
6420
6421 RTGCPHYS GCPhysFirst;
6422 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6423 if (rcStrict != VINF_SUCCESS)
6424 return rcStrict;
6425
6426 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6427 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6428 if (fAccess & IEM_ACCESS_TYPE_READ)
6429 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6430
6431 void *pvMem;
6432 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6433 if (rcStrict != VINF_SUCCESS)
6434 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6435
6436#endif /* !IEM_WITH_DATA_TLB */
6437
6438 /*
6439 * Fill in the mapping table entry.
6440 */
6441 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6442 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6443 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6444 pVCpu->iem.s.cActiveMappings += 1;
6445
6446 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6447 *ppvMem = pvMem;
6448
6449 return VINF_SUCCESS;
6450}
6451
6452
6453/**
6454 * Commits the guest memory if bounce buffered and unmaps it.
6455 *
6456 * @returns Strict VBox status code.
6457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6458 * @param pvMem The mapping.
6459 * @param fAccess The kind of access.
6460 */
6461VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6462{
6463 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6464 AssertReturn(iMemMap >= 0, iMemMap);
6465
6466 /* If it's bounce buffered, we may need to write back the buffer. */
6467 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6468 {
6469 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6470 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6471 }
6472 /* Otherwise unlock it. */
6473 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6474 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6475
6476 /* Free the entry. */
6477 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6478 Assert(pVCpu->iem.s.cActiveMappings != 0);
6479 pVCpu->iem.s.cActiveMappings--;
6480 return VINF_SUCCESS;
6481}
6482
6483#ifdef IEM_WITH_SETJMP
6484
6485/**
6486 * Maps the specified guest memory for the given kind of access, longjmp on
6487 * error.
6488 *
6489 * This may be using bounce buffering of the memory if it's crossing a page
6490 * boundary or if there is an access handler installed for any of it. Because
6491 * of lock prefix guarantees, we're in for some extra clutter when this
6492 * happens.
6493 *
6494 * This may raise a \#GP, \#SS, \#PF or \#AC.
6495 *
6496 * @returns Pointer to the mapped memory.
6497 *
6498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6499 * @param cbMem The number of bytes to map. This is usually 1,
6500 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6501 * string operations it can be up to a page.
6502 * @param iSegReg The index of the segment register to use for
6503 * this access. The base and limits are checked.
6504 * Use UINT8_MAX to indicate that no segmentation
6505 * is required (for IDT, GDT and LDT accesses).
6506 * @param GCPtrMem The address of the guest memory.
6507 * @param fAccess How the memory is being accessed. The
6508 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6509 * how to map the memory, while the
6510 * IEM_ACCESS_WHAT_XXX bit is used when raising
6511 * exceptions.
6512 * @param uAlignCtl Alignment control:
6513 * - Bits 15:0 is the alignment mask.
6514 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6515 * IEM_MEMMAP_F_ALIGN_SSE, and
6516 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6517 * Pass zero to skip alignment.
6518 */
6519void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6520 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6521{
6522 /*
6523 * Check the input, check segment access and adjust address
6524 * with segment base.
6525 */
6526 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6527 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6528 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6529
6530 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6531 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6532 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6533
6534 /*
6535 * Alignment check.
6536 */
6537 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6538 { /* likelyish */ }
6539 else
6540 {
6541 /* Misaligned access. */
6542 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6543 {
6544 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6545 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6546 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6547 {
6548 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6549
6550 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6551 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6552 }
6553 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6554 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6555 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6556 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6557 * that's what FXSAVE does on a 10980xe. */
6558 && iemMemAreAlignmentChecksEnabled(pVCpu))
6559 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6560 else
6561 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6562 }
6563 }
6564
6565 /*
6566 * Figure out which mapping entry to use.
6567 */
6568 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6569 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6570 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6571 {
6572 iMemMap = iemMemMapFindFree(pVCpu);
6573 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6574 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6575 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6576 pVCpu->iem.s.aMemMappings[2].fAccess),
6577 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6578 }
6579
6580 /*
6581 * Crossing a page boundary?
6582 */
6583 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6584 { /* No (likely). */ }
6585 else
6586 {
6587 void *pvMem;
6588 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6589 if (rcStrict == VINF_SUCCESS)
6590 return pvMem;
6591 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6592 }
6593
6594#ifdef IEM_WITH_DATA_TLB
6595 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6596
6597 /*
6598 * Get the TLB entry for this page.
6599 */
6600 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6601 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6602 if (pTlbe->uTag == uTag)
6603 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6604 else
6605 {
6606 pVCpu->iem.s.DataTlb.cTlbMisses++;
6607 PGMPTWALK Walk;
6608 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6609 if (RT_FAILURE(rc))
6610 {
6611 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6612# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6613 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6614 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6615# endif
6616 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6617 }
6618
6619 Assert(Walk.fSucceeded);
6620 pTlbe->uTag = uTag;
6621 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6622 pTlbe->GCPhys = Walk.GCPhys;
6623 pTlbe->pbMappingR3 = NULL;
6624 }
6625
6626 /*
6627 * Check the flags and physical revision.
6628 */
6629 /** @todo make the caller pass these in with fAccess. */
6630 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6631 ? IEMTLBE_F_PT_NO_USER : 0;
6632 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6633 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6634 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6635 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6636 ? IEMTLBE_F_PT_NO_WRITE : 0)
6637 : 0;
6638 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6639 uint8_t *pbMem = NULL;
6640 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6641 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6642# ifdef IN_RING3
6643 pbMem = pTlbe->pbMappingR3;
6644# else
6645 pbMem = NULL;
6646# endif
6647 else
6648 {
6649 /*
6650 * Okay, something isn't quite right or needs refreshing.
6651 */
6652 /* Write to read only memory? */
6653 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6654 {
6655 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6656# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6657 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6658 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6659# endif
6660 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6661 }
6662
6663 /* Kernel memory accessed by userland? */
6664 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6665 {
6666 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6667# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6668 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6669 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6670# endif
6671 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6672 }
6673
6674 /* Set the dirty / access flags.
6675 ASSUMES this is set when the address is translated rather than on commit... */
6676 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6677 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6678 {
6679 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6680 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6681 AssertRC(rc2);
6682 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6683 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6684 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6685 }
6686
6687 /*
6688 * Check if the physical page info needs updating.
6689 */
6690 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6691# ifdef IN_RING3
6692 pbMem = pTlbe->pbMappingR3;
6693# else
6694 pbMem = NULL;
6695# endif
6696 else
6697 {
6698 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6699 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6700 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6701 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6702 pTlbe->pbMappingR3 = NULL;
6703 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6704 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6705 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6706 &pbMem, &pTlbe->fFlagsAndPhysRev);
6707 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6708# ifdef IN_RING3
6709 pTlbe->pbMappingR3 = pbMem;
6710# endif
6711 }
6712
6713 /*
6714 * Check the physical page level access and mapping.
6715 */
6716 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6717 { /* probably likely */ }
6718 else
6719 {
6720 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6721 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6722 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6723 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6724 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6725 if (rcStrict == VINF_SUCCESS)
6726 return pbMem;
6727 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6728 }
6729 }
6730 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6731
6732 if (pbMem)
6733 {
6734 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6735 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6736 fAccess |= IEM_ACCESS_NOT_LOCKED;
6737 }
6738 else
6739 {
6740 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6741 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6742 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6743 if (rcStrict == VINF_SUCCESS)
6744 return pbMem;
6745 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6746 }
6747
6748 void * const pvMem = pbMem;
6749
6750 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6751 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6752 if (fAccess & IEM_ACCESS_TYPE_READ)
6753 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6754
6755#else /* !IEM_WITH_DATA_TLB */
6756
6757
6758 RTGCPHYS GCPhysFirst;
6759 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6760 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6761 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6762
6763 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6764 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6765 if (fAccess & IEM_ACCESS_TYPE_READ)
6766 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6767
6768 void *pvMem;
6769 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6770 if (rcStrict == VINF_SUCCESS)
6771 { /* likely */ }
6772 else
6773 {
6774 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6775 if (rcStrict == VINF_SUCCESS)
6776 return pvMem;
6777 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6778 }
6779
6780#endif /* !IEM_WITH_DATA_TLB */
6781
6782 /*
6783 * Fill in the mapping table entry.
6784 */
6785 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6786 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6787 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6788 pVCpu->iem.s.cActiveMappings++;
6789
6790 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6791 return pvMem;
6792}
6793
6794
6795/**
6796 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6797 *
6798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6799 * @param pvMem The mapping.
6800 * @param fAccess The kind of access.
6801 */
6802void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6803{
6804 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6805 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6806
6807 /* If it's bounce buffered, we may need to write back the buffer. */
6808 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6809 {
6810 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6811 {
6812 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6813 if (rcStrict == VINF_SUCCESS)
6814 return;
6815 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6816 }
6817 }
6818 /* Otherwise unlock it. */
6819 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6820 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6821
6822 /* Free the entry. */
6823 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6824 Assert(pVCpu->iem.s.cActiveMappings != 0);
6825 pVCpu->iem.s.cActiveMappings--;
6826}
6827
6828#endif /* IEM_WITH_SETJMP */
6829
6830#ifndef IN_RING3
6831/**
6832 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6833 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6834 *
6835 * Allows the instruction to be completed and retired, while the IEM user will
6836 * return to ring-3 immediately afterwards and do the postponed writes there.
6837 *
6838 * @returns VBox status code (no strict statuses). Caller must check
6839 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6841 * @param pvMem The mapping.
6842 * @param fAccess The kind of access.
6843 */
6844VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6845{
6846 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6847 AssertReturn(iMemMap >= 0, iMemMap);
6848
6849 /* If it's bounce buffered, we may need to write back the buffer. */
6850 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6851 {
6852 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6853 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6854 }
6855 /* Otherwise unlock it. */
6856 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6857 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6858
6859 /* Free the entry. */
6860 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6861 Assert(pVCpu->iem.s.cActiveMappings != 0);
6862 pVCpu->iem.s.cActiveMappings--;
6863 return VINF_SUCCESS;
6864}
6865#endif
6866
6867
6868/**
6869 * Rollbacks mappings, releasing page locks and such.
6870 *
6871 * The caller shall only call this after checking cActiveMappings.
6872 *
6873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6874 */
6875void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6876{
6877 Assert(pVCpu->iem.s.cActiveMappings > 0);
6878
6879 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6880 while (iMemMap-- > 0)
6881 {
6882 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6883 if (fAccess != IEM_ACCESS_INVALID)
6884 {
6885 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6886 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6887 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6888 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6889 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6890 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6891 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6893 pVCpu->iem.s.cActiveMappings--;
6894 }
6895 }
6896}
6897
6898
6899/**
6900 * Fetches a data byte.
6901 *
6902 * @returns Strict VBox status code.
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 * @param pu8Dst Where to return the byte.
6905 * @param iSegReg The index of the segment register to use for
6906 * this access. The base and limits are checked.
6907 * @param GCPtrMem The address of the guest memory.
6908 */
6909VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6910{
6911 /* The lazy approach for now... */
6912 uint8_t const *pu8Src;
6913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6914 if (rc == VINF_SUCCESS)
6915 {
6916 *pu8Dst = *pu8Src;
6917 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6918 }
6919 return rc;
6920}
6921
6922
6923#ifdef IEM_WITH_SETJMP
6924/**
6925 * Fetches a data byte, longjmp on error.
6926 *
6927 * @returns The byte.
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 * @param iSegReg The index of the segment register to use for
6930 * this access. The base and limits are checked.
6931 * @param GCPtrMem The address of the guest memory.
6932 */
6933uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6934{
6935 /* The lazy approach for now... */
6936 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6937 uint8_t const bRet = *pu8Src;
6938 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6939 return bRet;
6940}
6941#endif /* IEM_WITH_SETJMP */
6942
6943
6944/**
6945 * Fetches a data word.
6946 *
6947 * @returns Strict VBox status code.
6948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6949 * @param pu16Dst Where to return the word.
6950 * @param iSegReg The index of the segment register to use for
6951 * this access. The base and limits are checked.
6952 * @param GCPtrMem The address of the guest memory.
6953 */
6954VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6955{
6956 /* The lazy approach for now... */
6957 uint16_t const *pu16Src;
6958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6959 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6960 if (rc == VINF_SUCCESS)
6961 {
6962 *pu16Dst = *pu16Src;
6963 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6964 }
6965 return rc;
6966}
6967
6968
6969#ifdef IEM_WITH_SETJMP
6970/**
6971 * Fetches a data word, longjmp on error.
6972 *
6973 * @returns The word
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 * @param iSegReg The index of the segment register to use for
6976 * this access. The base and limits are checked.
6977 * @param GCPtrMem The address of the guest memory.
6978 */
6979uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6980{
6981 /* The lazy approach for now... */
6982 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6983 sizeof(*pu16Src) - 1);
6984 uint16_t const u16Ret = *pu16Src;
6985 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6986 return u16Ret;
6987}
6988#endif
6989
6990
6991/**
6992 * Fetches a data dword.
6993 *
6994 * @returns Strict VBox status code.
6995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6996 * @param pu32Dst Where to return the dword.
6997 * @param iSegReg The index of the segment register to use for
6998 * this access. The base and limits are checked.
6999 * @param GCPtrMem The address of the guest memory.
7000 */
7001VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7002{
7003 /* The lazy approach for now... */
7004 uint32_t const *pu32Src;
7005 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7006 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7007 if (rc == VINF_SUCCESS)
7008 {
7009 *pu32Dst = *pu32Src;
7010 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7011 }
7012 return rc;
7013}
7014
7015
7016/**
7017 * Fetches a data dword and zero extends it to a qword.
7018 *
7019 * @returns Strict VBox status code.
7020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7021 * @param pu64Dst Where to return the qword.
7022 * @param iSegReg The index of the segment register to use for
7023 * this access. The base and limits are checked.
7024 * @param GCPtrMem The address of the guest memory.
7025 */
7026VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7027{
7028 /* The lazy approach for now... */
7029 uint32_t const *pu32Src;
7030 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7031 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7032 if (rc == VINF_SUCCESS)
7033 {
7034 *pu64Dst = *pu32Src;
7035 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7036 }
7037 return rc;
7038}
7039
7040
7041#ifdef IEM_WITH_SETJMP
7042
7043/**
7044 * Fetches a data dword, longjmp on error, fallback/safe version.
7045 *
7046 * @returns The dword
7047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7048 * @param iSegReg The index of the segment register to use for
7049 * this access. The base and limits are checked.
7050 * @param GCPtrMem The address of the guest memory.
7051 */
7052uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7053{
7054 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7055 sizeof(*pu32Src) - 1);
7056 uint32_t const u32Ret = *pu32Src;
7057 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7058 return u32Ret;
7059}
7060
7061
7062/**
7063 * Fetches a data dword, longjmp on error.
7064 *
7065 * @returns The dword
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 * @param iSegReg The index of the segment register to use for
7068 * this access. The base and limits are checked.
7069 * @param GCPtrMem The address of the guest memory.
7070 */
7071uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7072{
7073# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7074 /*
7075 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7076 */
7077 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7078 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7079 {
7080 /*
7081 * TLB lookup.
7082 */
7083 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7084 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7085 if (pTlbe->uTag == uTag)
7086 {
7087 /*
7088 * Check TLB page table level access flags.
7089 */
7090 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7091 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7092 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7093 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7094 {
7095 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7096
7097 /*
7098 * Alignment check:
7099 */
7100 /** @todo check priority \#AC vs \#PF */
7101 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7102 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7103 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7104 || IEM_GET_CPL(pVCpu) != 3)
7105 {
7106 /*
7107 * Fetch and return the dword
7108 */
7109 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7110 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7111 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7112 }
7113 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7114 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7115 }
7116 }
7117 }
7118
7119 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7120 outdated page pointer, or other troubles. */
7121 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7122 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7123
7124# else
7125 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7126 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7127 uint32_t const u32Ret = *pu32Src;
7128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7129 return u32Ret;
7130# endif
7131}
7132#endif
7133
7134
7135#ifdef SOME_UNUSED_FUNCTION
7136/**
7137 * Fetches a data dword and sign extends it to a qword.
7138 *
7139 * @returns Strict VBox status code.
7140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7141 * @param pu64Dst Where to return the sign extended value.
7142 * @param iSegReg The index of the segment register to use for
7143 * this access. The base and limits are checked.
7144 * @param GCPtrMem The address of the guest memory.
7145 */
7146VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7147{
7148 /* The lazy approach for now... */
7149 int32_t const *pi32Src;
7150 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7151 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 *pu64Dst = *pi32Src;
7155 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7156 }
7157#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7158 else
7159 *pu64Dst = 0;
7160#endif
7161 return rc;
7162}
7163#endif
7164
7165
7166/**
7167 * Fetches a data qword.
7168 *
7169 * @returns Strict VBox status code.
7170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7171 * @param pu64Dst Where to return the qword.
7172 * @param iSegReg The index of the segment register to use for
7173 * this access. The base and limits are checked.
7174 * @param GCPtrMem The address of the guest memory.
7175 */
7176VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7177{
7178 /* The lazy approach for now... */
7179 uint64_t const *pu64Src;
7180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7181 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7182 if (rc == VINF_SUCCESS)
7183 {
7184 *pu64Dst = *pu64Src;
7185 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7186 }
7187 return rc;
7188}
7189
7190
7191#ifdef IEM_WITH_SETJMP
7192/**
7193 * Fetches a data qword, longjmp on error.
7194 *
7195 * @returns The qword.
7196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7197 * @param iSegReg The index of the segment register to use for
7198 * this access. The base and limits are checked.
7199 * @param GCPtrMem The address of the guest memory.
7200 */
7201uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7202{
7203 /* The lazy approach for now... */
7204 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7205 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7206 uint64_t const u64Ret = *pu64Src;
7207 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7208 return u64Ret;
7209}
7210#endif
7211
7212
7213/**
7214 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7215 *
7216 * @returns Strict VBox status code.
7217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7218 * @param pu64Dst Where to return the qword.
7219 * @param iSegReg The index of the segment register to use for
7220 * this access. The base and limits are checked.
7221 * @param GCPtrMem The address of the guest memory.
7222 */
7223VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7224{
7225 /* The lazy approach for now... */
7226 uint64_t const *pu64Src;
7227 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7228 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7229 if (rc == VINF_SUCCESS)
7230 {
7231 *pu64Dst = *pu64Src;
7232 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7233 }
7234 return rc;
7235}
7236
7237
7238#ifdef IEM_WITH_SETJMP
7239/**
7240 * Fetches a data qword, longjmp on error.
7241 *
7242 * @returns The qword.
7243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7244 * @param iSegReg The index of the segment register to use for
7245 * this access. The base and limits are checked.
7246 * @param GCPtrMem The address of the guest memory.
7247 */
7248uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7249{
7250 /* The lazy approach for now... */
7251 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7252 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7253 uint64_t const u64Ret = *pu64Src;
7254 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7255 return u64Ret;
7256}
7257#endif
7258
7259
7260/**
7261 * Fetches a data tword.
7262 *
7263 * @returns Strict VBox status code.
7264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7265 * @param pr80Dst Where to return the tword.
7266 * @param iSegReg The index of the segment register to use for
7267 * this access. The base and limits are checked.
7268 * @param GCPtrMem The address of the guest memory.
7269 */
7270VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7271{
7272 /* The lazy approach for now... */
7273 PCRTFLOAT80U pr80Src;
7274 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7275 if (rc == VINF_SUCCESS)
7276 {
7277 *pr80Dst = *pr80Src;
7278 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7279 }
7280 return rc;
7281}
7282
7283
7284#ifdef IEM_WITH_SETJMP
7285/**
7286 * Fetches a data tword, longjmp on error.
7287 *
7288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7289 * @param pr80Dst Where to return the tword.
7290 * @param iSegReg The index of the segment register to use for
7291 * this access. The base and limits are checked.
7292 * @param GCPtrMem The address of the guest memory.
7293 */
7294void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7295{
7296 /* The lazy approach for now... */
7297 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7298 *pr80Dst = *pr80Src;
7299 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7300}
7301#endif
7302
7303
7304/**
7305 * Fetches a data decimal tword.
7306 *
7307 * @returns Strict VBox status code.
7308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7309 * @param pd80Dst Where to return the tword.
7310 * @param iSegReg The index of the segment register to use for
7311 * this access. The base and limits are checked.
7312 * @param GCPtrMem The address of the guest memory.
7313 */
7314VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7315{
7316 /* The lazy approach for now... */
7317 PCRTPBCD80U pd80Src;
7318 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7319 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7320 if (rc == VINF_SUCCESS)
7321 {
7322 *pd80Dst = *pd80Src;
7323 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7324 }
7325 return rc;
7326}
7327
7328
7329#ifdef IEM_WITH_SETJMP
7330/**
7331 * Fetches a data decimal tword, longjmp on error.
7332 *
7333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7334 * @param pd80Dst Where to return the tword.
7335 * @param iSegReg The index of the segment register to use for
7336 * this access. The base and limits are checked.
7337 * @param GCPtrMem The address of the guest memory.
7338 */
7339void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7340{
7341 /* The lazy approach for now... */
7342 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7343 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7344 *pd80Dst = *pd80Src;
7345 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7346}
7347#endif
7348
7349
7350/**
7351 * Fetches a data dqword (double qword), generally SSE related.
7352 *
7353 * @returns Strict VBox status code.
7354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7355 * @param pu128Dst Where to return the qword.
7356 * @param iSegReg The index of the segment register to use for
7357 * this access. The base and limits are checked.
7358 * @param GCPtrMem The address of the guest memory.
7359 */
7360VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7361{
7362 /* The lazy approach for now... */
7363 PCRTUINT128U pu128Src;
7364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7365 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7366 if (rc == VINF_SUCCESS)
7367 {
7368 pu128Dst->au64[0] = pu128Src->au64[0];
7369 pu128Dst->au64[1] = pu128Src->au64[1];
7370 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7371 }
7372 return rc;
7373}
7374
7375
7376#ifdef IEM_WITH_SETJMP
7377/**
7378 * Fetches a data dqword (double qword), generally SSE related.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pu128Dst Where to return the qword.
7382 * @param iSegReg The index of the segment register to use for
7383 * this access. The base and limits are checked.
7384 * @param GCPtrMem The address of the guest memory.
7385 */
7386void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7387{
7388 /* The lazy approach for now... */
7389 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7390 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7391 pu128Dst->au64[0] = pu128Src->au64[0];
7392 pu128Dst->au64[1] = pu128Src->au64[1];
7393 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7394}
7395#endif
7396
7397
7398/**
7399 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7400 * related.
7401 *
7402 * Raises \#GP(0) if not aligned.
7403 *
7404 * @returns Strict VBox status code.
7405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7406 * @param pu128Dst Where to return the qword.
7407 * @param iSegReg The index of the segment register to use for
7408 * this access. The base and limits are checked.
7409 * @param GCPtrMem The address of the guest memory.
7410 */
7411VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7412{
7413 /* The lazy approach for now... */
7414 PCRTUINT128U pu128Src;
7415 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7416 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7417 if (rc == VINF_SUCCESS)
7418 {
7419 pu128Dst->au64[0] = pu128Src->au64[0];
7420 pu128Dst->au64[1] = pu128Src->au64[1];
7421 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7422 }
7423 return rc;
7424}
7425
7426
7427#ifdef IEM_WITH_SETJMP
7428/**
7429 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7430 * related, longjmp on error.
7431 *
7432 * Raises \#GP(0) if not aligned.
7433 *
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 * @param pu128Dst Where to return the qword.
7436 * @param iSegReg The index of the segment register to use for
7437 * this access. The base and limits are checked.
7438 * @param GCPtrMem The address of the guest memory.
7439 */
7440void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7441 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7442{
7443 /* The lazy approach for now... */
7444 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7445 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7446 pu128Dst->au64[0] = pu128Src->au64[0];
7447 pu128Dst->au64[1] = pu128Src->au64[1];
7448 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7449}
7450#endif
7451
7452
7453/**
7454 * Fetches a data oword (octo word), generally AVX related.
7455 *
7456 * @returns Strict VBox status code.
7457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7458 * @param pu256Dst Where to return the qword.
7459 * @param iSegReg The index of the segment register to use for
7460 * this access. The base and limits are checked.
7461 * @param GCPtrMem The address of the guest memory.
7462 */
7463VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7464{
7465 /* The lazy approach for now... */
7466 PCRTUINT256U pu256Src;
7467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7468 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7469 if (rc == VINF_SUCCESS)
7470 {
7471 pu256Dst->au64[0] = pu256Src->au64[0];
7472 pu256Dst->au64[1] = pu256Src->au64[1];
7473 pu256Dst->au64[2] = pu256Src->au64[2];
7474 pu256Dst->au64[3] = pu256Src->au64[3];
7475 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7476 }
7477 return rc;
7478}
7479
7480
7481#ifdef IEM_WITH_SETJMP
7482/**
7483 * Fetches a data oword (octo word), generally AVX related.
7484 *
7485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7486 * @param pu256Dst Where to return the qword.
7487 * @param iSegReg The index of the segment register to use for
7488 * this access. The base and limits are checked.
7489 * @param GCPtrMem The address of the guest memory.
7490 */
7491void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7492{
7493 /* The lazy approach for now... */
7494 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7495 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7496 pu256Dst->au64[0] = pu256Src->au64[0];
7497 pu256Dst->au64[1] = pu256Src->au64[1];
7498 pu256Dst->au64[2] = pu256Src->au64[2];
7499 pu256Dst->au64[3] = pu256Src->au64[3];
7500 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7501}
7502#endif
7503
7504
7505/**
7506 * Fetches a data oword (octo word) at an aligned address, generally AVX
7507 * related.
7508 *
7509 * Raises \#GP(0) if not aligned.
7510 *
7511 * @returns Strict VBox status code.
7512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7513 * @param pu256Dst Where to return the qword.
7514 * @param iSegReg The index of the segment register to use for
7515 * this access. The base and limits are checked.
7516 * @param GCPtrMem The address of the guest memory.
7517 */
7518VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7519{
7520 /* The lazy approach for now... */
7521 PCRTUINT256U pu256Src;
7522 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7523 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7524 if (rc == VINF_SUCCESS)
7525 {
7526 pu256Dst->au64[0] = pu256Src->au64[0];
7527 pu256Dst->au64[1] = pu256Src->au64[1];
7528 pu256Dst->au64[2] = pu256Src->au64[2];
7529 pu256Dst->au64[3] = pu256Src->au64[3];
7530 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7531 }
7532 return rc;
7533}
7534
7535
7536#ifdef IEM_WITH_SETJMP
7537/**
7538 * Fetches a data oword (octo word) at an aligned address, generally AVX
7539 * related, longjmp on error.
7540 *
7541 * Raises \#GP(0) if not aligned.
7542 *
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 * @param pu256Dst Where to return the qword.
7545 * @param iSegReg The index of the segment register to use for
7546 * this access. The base and limits are checked.
7547 * @param GCPtrMem The address of the guest memory.
7548 */
7549void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7550 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7551{
7552 /* The lazy approach for now... */
7553 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7554 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7555 pu256Dst->au64[0] = pu256Src->au64[0];
7556 pu256Dst->au64[1] = pu256Src->au64[1];
7557 pu256Dst->au64[2] = pu256Src->au64[2];
7558 pu256Dst->au64[3] = pu256Src->au64[3];
7559 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7560}
7561#endif
7562
7563
7564
7565/**
7566 * Fetches a descriptor register (lgdt, lidt).
7567 *
7568 * @returns Strict VBox status code.
7569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7570 * @param pcbLimit Where to return the limit.
7571 * @param pGCPtrBase Where to return the base.
7572 * @param iSegReg The index of the segment register to use for
7573 * this access. The base and limits are checked.
7574 * @param GCPtrMem The address of the guest memory.
7575 * @param enmOpSize The effective operand size.
7576 */
7577VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7578 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7579{
7580 /*
7581 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7582 * little special:
7583 * - The two reads are done separately.
7584 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7585 * - We suspect the 386 to actually commit the limit before the base in
7586 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7587 * don't try emulate this eccentric behavior, because it's not well
7588 * enough understood and rather hard to trigger.
7589 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7590 */
7591 VBOXSTRICTRC rcStrict;
7592 if (IEM_IS_64BIT_CODE(pVCpu))
7593 {
7594 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7595 if (rcStrict == VINF_SUCCESS)
7596 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7597 }
7598 else
7599 {
7600 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7601 if (enmOpSize == IEMMODE_32BIT)
7602 {
7603 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7604 {
7605 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7606 if (rcStrict == VINF_SUCCESS)
7607 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7608 }
7609 else
7610 {
7611 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7612 if (rcStrict == VINF_SUCCESS)
7613 {
7614 *pcbLimit = (uint16_t)uTmp;
7615 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7616 }
7617 }
7618 if (rcStrict == VINF_SUCCESS)
7619 *pGCPtrBase = uTmp;
7620 }
7621 else
7622 {
7623 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7624 if (rcStrict == VINF_SUCCESS)
7625 {
7626 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7627 if (rcStrict == VINF_SUCCESS)
7628 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7629 }
7630 }
7631 }
7632 return rcStrict;
7633}
7634
7635
7636
7637/**
7638 * Stores a data byte.
7639 *
7640 * @returns Strict VBox status code.
7641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7642 * @param iSegReg The index of the segment register to use for
7643 * this access. The base and limits are checked.
7644 * @param GCPtrMem The address of the guest memory.
7645 * @param u8Value The value to store.
7646 */
7647VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7648{
7649 /* The lazy approach for now... */
7650 uint8_t *pu8Dst;
7651 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7652 if (rc == VINF_SUCCESS)
7653 {
7654 *pu8Dst = u8Value;
7655 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7656 }
7657 return rc;
7658}
7659
7660
7661#ifdef IEM_WITH_SETJMP
7662/**
7663 * Stores a data byte, longjmp on error.
7664 *
7665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7666 * @param iSegReg The index of the segment register to use for
7667 * this access. The base and limits are checked.
7668 * @param GCPtrMem The address of the guest memory.
7669 * @param u8Value The value to store.
7670 */
7671void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7672{
7673 /* The lazy approach for now... */
7674 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7675 *pu8Dst = u8Value;
7676 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7677}
7678#endif
7679
7680
7681/**
7682 * Stores a data word.
7683 *
7684 * @returns Strict VBox status code.
7685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7686 * @param iSegReg The index of the segment register to use for
7687 * this access. The base and limits are checked.
7688 * @param GCPtrMem The address of the guest memory.
7689 * @param u16Value The value to store.
7690 */
7691VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7692{
7693 /* The lazy approach for now... */
7694 uint16_t *pu16Dst;
7695 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7696 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7697 if (rc == VINF_SUCCESS)
7698 {
7699 *pu16Dst = u16Value;
7700 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7701 }
7702 return rc;
7703}
7704
7705
7706#ifdef IEM_WITH_SETJMP
7707/**
7708 * Stores a data word, longjmp on error.
7709 *
7710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7711 * @param iSegReg The index of the segment register to use for
7712 * this access. The base and limits are checked.
7713 * @param GCPtrMem The address of the guest memory.
7714 * @param u16Value The value to store.
7715 */
7716void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7717{
7718 /* The lazy approach for now... */
7719 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7720 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7721 *pu16Dst = u16Value;
7722 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7723}
7724#endif
7725
7726
7727/**
7728 * Stores a data dword.
7729 *
7730 * @returns Strict VBox status code.
7731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7732 * @param iSegReg The index of the segment register to use for
7733 * this access. The base and limits are checked.
7734 * @param GCPtrMem The address of the guest memory.
7735 * @param u32Value The value to store.
7736 */
7737VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7738{
7739 /* The lazy approach for now... */
7740 uint32_t *pu32Dst;
7741 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7742 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7743 if (rc == VINF_SUCCESS)
7744 {
7745 *pu32Dst = u32Value;
7746 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7747 }
7748 return rc;
7749}
7750
7751
7752#ifdef IEM_WITH_SETJMP
7753/**
7754 * Stores a data dword.
7755 *
7756 * @returns Strict VBox status code.
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 * @param iSegReg The index of the segment register to use for
7759 * this access. The base and limits are checked.
7760 * @param GCPtrMem The address of the guest memory.
7761 * @param u32Value The value to store.
7762 */
7763void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7764{
7765 /* The lazy approach for now... */
7766 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7767 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7768 *pu32Dst = u32Value;
7769 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7770}
7771#endif
7772
7773
7774/**
7775 * Stores a data qword.
7776 *
7777 * @returns Strict VBox status code.
7778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7779 * @param iSegReg The index of the segment register to use for
7780 * this access. The base and limits are checked.
7781 * @param GCPtrMem The address of the guest memory.
7782 * @param u64Value The value to store.
7783 */
7784VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7785{
7786 /* The lazy approach for now... */
7787 uint64_t *pu64Dst;
7788 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7789 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7790 if (rc == VINF_SUCCESS)
7791 {
7792 *pu64Dst = u64Value;
7793 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7794 }
7795 return rc;
7796}
7797
7798
7799#ifdef IEM_WITH_SETJMP
7800/**
7801 * Stores a data qword, longjmp on error.
7802 *
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param iSegReg The index of the segment register to use for
7805 * this access. The base and limits are checked.
7806 * @param GCPtrMem The address of the guest memory.
7807 * @param u64Value The value to store.
7808 */
7809void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7810{
7811 /* The lazy approach for now... */
7812 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7813 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7814 *pu64Dst = u64Value;
7815 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7816}
7817#endif
7818
7819
7820/**
7821 * Stores a data dqword.
7822 *
7823 * @returns Strict VBox status code.
7824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7825 * @param iSegReg The index of the segment register to use for
7826 * this access. The base and limits are checked.
7827 * @param GCPtrMem The address of the guest memory.
7828 * @param u128Value The value to store.
7829 */
7830VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7831{
7832 /* The lazy approach for now... */
7833 PRTUINT128U pu128Dst;
7834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7835 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7836 if (rc == VINF_SUCCESS)
7837 {
7838 pu128Dst->au64[0] = u128Value.au64[0];
7839 pu128Dst->au64[1] = u128Value.au64[1];
7840 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7841 }
7842 return rc;
7843}
7844
7845
7846#ifdef IEM_WITH_SETJMP
7847/**
7848 * Stores a data dqword, longjmp on error.
7849 *
7850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7851 * @param iSegReg The index of the segment register to use for
7852 * this access. The base and limits are checked.
7853 * @param GCPtrMem The address of the guest memory.
7854 * @param u128Value The value to store.
7855 */
7856void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7857{
7858 /* The lazy approach for now... */
7859 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7860 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7861 pu128Dst->au64[0] = u128Value.au64[0];
7862 pu128Dst->au64[1] = u128Value.au64[1];
7863 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7864}
7865#endif
7866
7867
7868/**
7869 * Stores a data dqword, SSE aligned.
7870 *
7871 * @returns Strict VBox status code.
7872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7873 * @param iSegReg The index of the segment register to use for
7874 * this access. The base and limits are checked.
7875 * @param GCPtrMem The address of the guest memory.
7876 * @param u128Value The value to store.
7877 */
7878VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7879{
7880 /* The lazy approach for now... */
7881 PRTUINT128U pu128Dst;
7882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7883 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7884 if (rc == VINF_SUCCESS)
7885 {
7886 pu128Dst->au64[0] = u128Value.au64[0];
7887 pu128Dst->au64[1] = u128Value.au64[1];
7888 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7889 }
7890 return rc;
7891}
7892
7893
7894#ifdef IEM_WITH_SETJMP
7895/**
7896 * Stores a data dqword, SSE aligned.
7897 *
7898 * @returns Strict VBox status code.
7899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7900 * @param iSegReg The index of the segment register to use for
7901 * this access. The base and limits are checked.
7902 * @param GCPtrMem The address of the guest memory.
7903 * @param u128Value The value to store.
7904 */
7905void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7906 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7907{
7908 /* The lazy approach for now... */
7909 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7910 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7911 pu128Dst->au64[0] = u128Value.au64[0];
7912 pu128Dst->au64[1] = u128Value.au64[1];
7913 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7914}
7915#endif
7916
7917
7918/**
7919 * Stores a data dqword.
7920 *
7921 * @returns Strict VBox status code.
7922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7923 * @param iSegReg The index of the segment register to use for
7924 * this access. The base and limits are checked.
7925 * @param GCPtrMem The address of the guest memory.
7926 * @param pu256Value Pointer to the value to store.
7927 */
7928VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7929{
7930 /* The lazy approach for now... */
7931 PRTUINT256U pu256Dst;
7932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7933 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7934 if (rc == VINF_SUCCESS)
7935 {
7936 pu256Dst->au64[0] = pu256Value->au64[0];
7937 pu256Dst->au64[1] = pu256Value->au64[1];
7938 pu256Dst->au64[2] = pu256Value->au64[2];
7939 pu256Dst->au64[3] = pu256Value->au64[3];
7940 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7941 }
7942 return rc;
7943}
7944
7945
7946#ifdef IEM_WITH_SETJMP
7947/**
7948 * Stores a data dqword, longjmp on error.
7949 *
7950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7951 * @param iSegReg The index of the segment register to use for
7952 * this access. The base and limits are checked.
7953 * @param GCPtrMem The address of the guest memory.
7954 * @param pu256Value Pointer to the value to store.
7955 */
7956void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7957{
7958 /* The lazy approach for now... */
7959 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7960 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7961 pu256Dst->au64[0] = pu256Value->au64[0];
7962 pu256Dst->au64[1] = pu256Value->au64[1];
7963 pu256Dst->au64[2] = pu256Value->au64[2];
7964 pu256Dst->au64[3] = pu256Value->au64[3];
7965 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7966}
7967#endif
7968
7969
7970/**
7971 * Stores a data dqword, AVX \#GP(0) aligned.
7972 *
7973 * @returns Strict VBox status code.
7974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7975 * @param iSegReg The index of the segment register to use for
7976 * this access. The base and limits are checked.
7977 * @param GCPtrMem The address of the guest memory.
7978 * @param pu256Value Pointer to the value to store.
7979 */
7980VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7981{
7982 /* The lazy approach for now... */
7983 PRTUINT256U pu256Dst;
7984 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7985 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7986 if (rc == VINF_SUCCESS)
7987 {
7988 pu256Dst->au64[0] = pu256Value->au64[0];
7989 pu256Dst->au64[1] = pu256Value->au64[1];
7990 pu256Dst->au64[2] = pu256Value->au64[2];
7991 pu256Dst->au64[3] = pu256Value->au64[3];
7992 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7993 }
7994 return rc;
7995}
7996
7997
7998#ifdef IEM_WITH_SETJMP
7999/**
8000 * Stores a data dqword, AVX aligned.
8001 *
8002 * @returns Strict VBox status code.
8003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8004 * @param iSegReg The index of the segment register to use for
8005 * this access. The base and limits are checked.
8006 * @param GCPtrMem The address of the guest memory.
8007 * @param pu256Value Pointer to the value to store.
8008 */
8009void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8010 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8011{
8012 /* The lazy approach for now... */
8013 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8014 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8015 pu256Dst->au64[0] = pu256Value->au64[0];
8016 pu256Dst->au64[1] = pu256Value->au64[1];
8017 pu256Dst->au64[2] = pu256Value->au64[2];
8018 pu256Dst->au64[3] = pu256Value->au64[3];
8019 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8020}
8021#endif
8022
8023
8024/**
8025 * Stores a descriptor register (sgdt, sidt).
8026 *
8027 * @returns Strict VBox status code.
8028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8029 * @param cbLimit The limit.
8030 * @param GCPtrBase The base address.
8031 * @param iSegReg The index of the segment register to use for
8032 * this access. The base and limits are checked.
8033 * @param GCPtrMem The address of the guest memory.
8034 */
8035VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8036{
8037 /*
8038 * The SIDT and SGDT instructions actually stores the data using two
8039 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8040 * does not respond to opsize prefixes.
8041 */
8042 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8043 if (rcStrict == VINF_SUCCESS)
8044 {
8045 if (IEM_IS_16BIT_CODE(pVCpu))
8046 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8047 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8048 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8049 else if (IEM_IS_32BIT_CODE(pVCpu))
8050 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8051 else
8052 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8053 }
8054 return rcStrict;
8055}
8056
8057
8058/**
8059 * Pushes a word onto the stack.
8060 *
8061 * @returns Strict VBox status code.
8062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8063 * @param u16Value The value to push.
8064 */
8065VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8066{
8067 /* Increment the stack pointer. */
8068 uint64_t uNewRsp;
8069 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8070
8071 /* Write the word the lazy way. */
8072 uint16_t *pu16Dst;
8073 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8074 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8075 if (rc == VINF_SUCCESS)
8076 {
8077 *pu16Dst = u16Value;
8078 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8079 }
8080
8081 /* Commit the new RSP value unless we an access handler made trouble. */
8082 if (rc == VINF_SUCCESS)
8083 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8084
8085 return rc;
8086}
8087
8088
8089/**
8090 * Pushes a dword onto the stack.
8091 *
8092 * @returns Strict VBox status code.
8093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8094 * @param u32Value The value to push.
8095 */
8096VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8097{
8098 /* Increment the stack pointer. */
8099 uint64_t uNewRsp;
8100 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8101
8102 /* Write the dword the lazy way. */
8103 uint32_t *pu32Dst;
8104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8105 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8106 if (rc == VINF_SUCCESS)
8107 {
8108 *pu32Dst = u32Value;
8109 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8110 }
8111
8112 /* Commit the new RSP value unless we an access handler made trouble. */
8113 if (rc == VINF_SUCCESS)
8114 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8115
8116 return rc;
8117}
8118
8119
8120/**
8121 * Pushes a dword segment register value onto the stack.
8122 *
8123 * @returns Strict VBox status code.
8124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8125 * @param u32Value The value to push.
8126 */
8127VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8128{
8129 /* Increment the stack pointer. */
8130 uint64_t uNewRsp;
8131 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8132
8133 /* The intel docs talks about zero extending the selector register
8134 value. My actual intel CPU here might be zero extending the value
8135 but it still only writes the lower word... */
8136 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8137 * happens when crossing an electric page boundrary, is the high word checked
8138 * for write accessibility or not? Probably it is. What about segment limits?
8139 * It appears this behavior is also shared with trap error codes.
8140 *
8141 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8142 * ancient hardware when it actually did change. */
8143 uint16_t *pu16Dst;
8144 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8145 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8146 if (rc == VINF_SUCCESS)
8147 {
8148 *pu16Dst = (uint16_t)u32Value;
8149 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8150 }
8151
8152 /* Commit the new RSP value unless we an access handler made trouble. */
8153 if (rc == VINF_SUCCESS)
8154 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8155
8156 return rc;
8157}
8158
8159
8160/**
8161 * Pushes a qword onto the stack.
8162 *
8163 * @returns Strict VBox status code.
8164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8165 * @param u64Value The value to push.
8166 */
8167VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8168{
8169 /* Increment the stack pointer. */
8170 uint64_t uNewRsp;
8171 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8172
8173 /* Write the word the lazy way. */
8174 uint64_t *pu64Dst;
8175 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8176 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8177 if (rc == VINF_SUCCESS)
8178 {
8179 *pu64Dst = u64Value;
8180 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8181 }
8182
8183 /* Commit the new RSP value unless we an access handler made trouble. */
8184 if (rc == VINF_SUCCESS)
8185 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8186
8187 return rc;
8188}
8189
8190
8191/**
8192 * Pops a word from the stack.
8193 *
8194 * @returns Strict VBox status code.
8195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8196 * @param pu16Value Where to store the popped value.
8197 */
8198VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8199{
8200 /* Increment the stack pointer. */
8201 uint64_t uNewRsp;
8202 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8203
8204 /* Write the word the lazy way. */
8205 uint16_t const *pu16Src;
8206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8207 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8208 if (rc == VINF_SUCCESS)
8209 {
8210 *pu16Value = *pu16Src;
8211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8212
8213 /* Commit the new RSP value. */
8214 if (rc == VINF_SUCCESS)
8215 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8216 }
8217
8218 return rc;
8219}
8220
8221
8222/**
8223 * Pops a dword from the stack.
8224 *
8225 * @returns Strict VBox status code.
8226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8227 * @param pu32Value Where to store the popped value.
8228 */
8229VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8230{
8231 /* Increment the stack pointer. */
8232 uint64_t uNewRsp;
8233 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8234
8235 /* Write the word the lazy way. */
8236 uint32_t const *pu32Src;
8237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8238 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8239 if (rc == VINF_SUCCESS)
8240 {
8241 *pu32Value = *pu32Src;
8242 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8243
8244 /* Commit the new RSP value. */
8245 if (rc == VINF_SUCCESS)
8246 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8247 }
8248
8249 return rc;
8250}
8251
8252
8253/**
8254 * Pops a qword from the stack.
8255 *
8256 * @returns Strict VBox status code.
8257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8258 * @param pu64Value Where to store the popped value.
8259 */
8260VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8261{
8262 /* Increment the stack pointer. */
8263 uint64_t uNewRsp;
8264 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8265
8266 /* Write the word the lazy way. */
8267 uint64_t const *pu64Src;
8268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8269 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8270 if (rc == VINF_SUCCESS)
8271 {
8272 *pu64Value = *pu64Src;
8273 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8274
8275 /* Commit the new RSP value. */
8276 if (rc == VINF_SUCCESS)
8277 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8278 }
8279
8280 return rc;
8281}
8282
8283
8284/**
8285 * Pushes a word onto the stack, using a temporary stack pointer.
8286 *
8287 * @returns Strict VBox status code.
8288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8289 * @param u16Value The value to push.
8290 * @param pTmpRsp Pointer to the temporary stack pointer.
8291 */
8292VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8293{
8294 /* Increment the stack pointer. */
8295 RTUINT64U NewRsp = *pTmpRsp;
8296 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8297
8298 /* Write the word the lazy way. */
8299 uint16_t *pu16Dst;
8300 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8301 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8302 if (rc == VINF_SUCCESS)
8303 {
8304 *pu16Dst = u16Value;
8305 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8306 }
8307
8308 /* Commit the new RSP value unless we an access handler made trouble. */
8309 if (rc == VINF_SUCCESS)
8310 *pTmpRsp = NewRsp;
8311
8312 return rc;
8313}
8314
8315
8316/**
8317 * Pushes a dword onto the stack, using a temporary stack pointer.
8318 *
8319 * @returns Strict VBox status code.
8320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8321 * @param u32Value The value to push.
8322 * @param pTmpRsp Pointer to the temporary stack pointer.
8323 */
8324VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8325{
8326 /* Increment the stack pointer. */
8327 RTUINT64U NewRsp = *pTmpRsp;
8328 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8329
8330 /* Write the word the lazy way. */
8331 uint32_t *pu32Dst;
8332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8333 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8334 if (rc == VINF_SUCCESS)
8335 {
8336 *pu32Dst = u32Value;
8337 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8338 }
8339
8340 /* Commit the new RSP value unless we an access handler made trouble. */
8341 if (rc == VINF_SUCCESS)
8342 *pTmpRsp = NewRsp;
8343
8344 return rc;
8345}
8346
8347
8348/**
8349 * Pushes a dword onto the stack, using a temporary stack pointer.
8350 *
8351 * @returns Strict VBox status code.
8352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8353 * @param u64Value The value to push.
8354 * @param pTmpRsp Pointer to the temporary stack pointer.
8355 */
8356VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8357{
8358 /* Increment the stack pointer. */
8359 RTUINT64U NewRsp = *pTmpRsp;
8360 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8361
8362 /* Write the word the lazy way. */
8363 uint64_t *pu64Dst;
8364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8365 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8366 if (rc == VINF_SUCCESS)
8367 {
8368 *pu64Dst = u64Value;
8369 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8370 }
8371
8372 /* Commit the new RSP value unless we an access handler made trouble. */
8373 if (rc == VINF_SUCCESS)
8374 *pTmpRsp = NewRsp;
8375
8376 return rc;
8377}
8378
8379
8380/**
8381 * Pops a word from the stack, using a temporary stack pointer.
8382 *
8383 * @returns Strict VBox status code.
8384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8385 * @param pu16Value Where to store the popped value.
8386 * @param pTmpRsp Pointer to the temporary stack pointer.
8387 */
8388VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8389{
8390 /* Increment the stack pointer. */
8391 RTUINT64U NewRsp = *pTmpRsp;
8392 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8393
8394 /* Write the word the lazy way. */
8395 uint16_t const *pu16Src;
8396 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8397 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8398 if (rc == VINF_SUCCESS)
8399 {
8400 *pu16Value = *pu16Src;
8401 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8402
8403 /* Commit the new RSP value. */
8404 if (rc == VINF_SUCCESS)
8405 *pTmpRsp = NewRsp;
8406 }
8407
8408 return rc;
8409}
8410
8411
8412/**
8413 * Pops a dword from the stack, using a temporary stack pointer.
8414 *
8415 * @returns Strict VBox status code.
8416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8417 * @param pu32Value Where to store the popped value.
8418 * @param pTmpRsp Pointer to the temporary stack pointer.
8419 */
8420VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8421{
8422 /* Increment the stack pointer. */
8423 RTUINT64U NewRsp = *pTmpRsp;
8424 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8425
8426 /* Write the word the lazy way. */
8427 uint32_t const *pu32Src;
8428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8429 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8430 if (rc == VINF_SUCCESS)
8431 {
8432 *pu32Value = *pu32Src;
8433 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8434
8435 /* Commit the new RSP value. */
8436 if (rc == VINF_SUCCESS)
8437 *pTmpRsp = NewRsp;
8438 }
8439
8440 return rc;
8441}
8442
8443
8444/**
8445 * Pops a qword from the stack, using a temporary stack pointer.
8446 *
8447 * @returns Strict VBox status code.
8448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8449 * @param pu64Value Where to store the popped value.
8450 * @param pTmpRsp Pointer to the temporary stack pointer.
8451 */
8452VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8453{
8454 /* Increment the stack pointer. */
8455 RTUINT64U NewRsp = *pTmpRsp;
8456 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8457
8458 /* Write the word the lazy way. */
8459 uint64_t const *pu64Src;
8460 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8461 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8462 if (rcStrict == VINF_SUCCESS)
8463 {
8464 *pu64Value = *pu64Src;
8465 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8466
8467 /* Commit the new RSP value. */
8468 if (rcStrict == VINF_SUCCESS)
8469 *pTmpRsp = NewRsp;
8470 }
8471
8472 return rcStrict;
8473}
8474
8475
8476/**
8477 * Begin a special stack push (used by interrupt, exceptions and such).
8478 *
8479 * This will raise \#SS or \#PF if appropriate.
8480 *
8481 * @returns Strict VBox status code.
8482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8483 * @param cbMem The number of bytes to push onto the stack.
8484 * @param cbAlign The alignment mask (7, 3, 1).
8485 * @param ppvMem Where to return the pointer to the stack memory.
8486 * As with the other memory functions this could be
8487 * direct access or bounce buffered access, so
8488 * don't commit register until the commit call
8489 * succeeds.
8490 * @param puNewRsp Where to return the new RSP value. This must be
8491 * passed unchanged to
8492 * iemMemStackPushCommitSpecial().
8493 */
8494VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8495 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8496{
8497 Assert(cbMem < UINT8_MAX);
8498 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8499 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8500 IEM_ACCESS_STACK_W, cbAlign);
8501}
8502
8503
8504/**
8505 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8506 *
8507 * This will update the rSP.
8508 *
8509 * @returns Strict VBox status code.
8510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8511 * @param pvMem The pointer returned by
8512 * iemMemStackPushBeginSpecial().
8513 * @param uNewRsp The new RSP value returned by
8514 * iemMemStackPushBeginSpecial().
8515 */
8516VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8517{
8518 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8519 if (rcStrict == VINF_SUCCESS)
8520 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8521 return rcStrict;
8522}
8523
8524
8525/**
8526 * Begin a special stack pop (used by iret, retf and such).
8527 *
8528 * This will raise \#SS or \#PF if appropriate.
8529 *
8530 * @returns Strict VBox status code.
8531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8532 * @param cbMem The number of bytes to pop from the stack.
8533 * @param cbAlign The alignment mask (7, 3, 1).
8534 * @param ppvMem Where to return the pointer to the stack memory.
8535 * @param puNewRsp Where to return the new RSP value. This must be
8536 * assigned to CPUMCTX::rsp manually some time
8537 * after iemMemStackPopDoneSpecial() has been
8538 * called.
8539 */
8540VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8541 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8542{
8543 Assert(cbMem < UINT8_MAX);
8544 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8545 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8546}
8547
8548
8549/**
8550 * Continue a special stack pop (used by iret and retf), for the purpose of
8551 * retrieving a new stack pointer.
8552 *
8553 * This will raise \#SS or \#PF if appropriate.
8554 *
8555 * @returns Strict VBox status code.
8556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8557 * @param off Offset from the top of the stack. This is zero
8558 * except in the retf case.
8559 * @param cbMem The number of bytes to pop from the stack.
8560 * @param ppvMem Where to return the pointer to the stack memory.
8561 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8562 * return this because all use of this function is
8563 * to retrieve a new value and anything we return
8564 * here would be discarded.)
8565 */
8566VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8567 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8568{
8569 Assert(cbMem < UINT8_MAX);
8570
8571 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8572 RTGCPTR GCPtrTop;
8573 if (IEM_IS_64BIT_CODE(pVCpu))
8574 GCPtrTop = uCurNewRsp;
8575 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8576 GCPtrTop = (uint32_t)uCurNewRsp;
8577 else
8578 GCPtrTop = (uint16_t)uCurNewRsp;
8579
8580 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8581 0 /* checked in iemMemStackPopBeginSpecial */);
8582}
8583
8584
8585/**
8586 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8587 * iemMemStackPopContinueSpecial).
8588 *
8589 * The caller will manually commit the rSP.
8590 *
8591 * @returns Strict VBox status code.
8592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8593 * @param pvMem The pointer returned by
8594 * iemMemStackPopBeginSpecial() or
8595 * iemMemStackPopContinueSpecial().
8596 */
8597VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8598{
8599 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8600}
8601
8602
8603/**
8604 * Fetches a system table byte.
8605 *
8606 * @returns Strict VBox status code.
8607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8608 * @param pbDst Where to return the byte.
8609 * @param iSegReg The index of the segment register to use for
8610 * this access. The base and limits are checked.
8611 * @param GCPtrMem The address of the guest memory.
8612 */
8613VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8614{
8615 /* The lazy approach for now... */
8616 uint8_t const *pbSrc;
8617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8618 if (rc == VINF_SUCCESS)
8619 {
8620 *pbDst = *pbSrc;
8621 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8622 }
8623 return rc;
8624}
8625
8626
8627/**
8628 * Fetches a system table word.
8629 *
8630 * @returns Strict VBox status code.
8631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8632 * @param pu16Dst Where to return the word.
8633 * @param iSegReg The index of the segment register to use for
8634 * this access. The base and limits are checked.
8635 * @param GCPtrMem The address of the guest memory.
8636 */
8637VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8638{
8639 /* The lazy approach for now... */
8640 uint16_t const *pu16Src;
8641 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8642 if (rc == VINF_SUCCESS)
8643 {
8644 *pu16Dst = *pu16Src;
8645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8646 }
8647 return rc;
8648}
8649
8650
8651/**
8652 * Fetches a system table dword.
8653 *
8654 * @returns Strict VBox status code.
8655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8656 * @param pu32Dst Where to return the dword.
8657 * @param iSegReg The index of the segment register to use for
8658 * this access. The base and limits are checked.
8659 * @param GCPtrMem The address of the guest memory.
8660 */
8661VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8662{
8663 /* The lazy approach for now... */
8664 uint32_t const *pu32Src;
8665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8666 if (rc == VINF_SUCCESS)
8667 {
8668 *pu32Dst = *pu32Src;
8669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8670 }
8671 return rc;
8672}
8673
8674
8675/**
8676 * Fetches a system table qword.
8677 *
8678 * @returns Strict VBox status code.
8679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8680 * @param pu64Dst Where to return the qword.
8681 * @param iSegReg The index of the segment register to use for
8682 * this access. The base and limits are checked.
8683 * @param GCPtrMem The address of the guest memory.
8684 */
8685VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8686{
8687 /* The lazy approach for now... */
8688 uint64_t const *pu64Src;
8689 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8690 if (rc == VINF_SUCCESS)
8691 {
8692 *pu64Dst = *pu64Src;
8693 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8694 }
8695 return rc;
8696}
8697
8698
8699/**
8700 * Fetches a descriptor table entry with caller specified error code.
8701 *
8702 * @returns Strict VBox status code.
8703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8704 * @param pDesc Where to return the descriptor table entry.
8705 * @param uSel The selector which table entry to fetch.
8706 * @param uXcpt The exception to raise on table lookup error.
8707 * @param uErrorCode The error code associated with the exception.
8708 */
8709static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8710 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8711{
8712 AssertPtr(pDesc);
8713 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8714
8715 /** @todo did the 286 require all 8 bytes to be accessible? */
8716 /*
8717 * Get the selector table base and check bounds.
8718 */
8719 RTGCPTR GCPtrBase;
8720 if (uSel & X86_SEL_LDT)
8721 {
8722 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8723 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8724 {
8725 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8726 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8727 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8728 uErrorCode, 0);
8729 }
8730
8731 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8732 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8733 }
8734 else
8735 {
8736 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8737 {
8738 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8739 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8740 uErrorCode, 0);
8741 }
8742 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8743 }
8744
8745 /*
8746 * Read the legacy descriptor and maybe the long mode extensions if
8747 * required.
8748 */
8749 VBOXSTRICTRC rcStrict;
8750 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8751 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8752 else
8753 {
8754 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8755 if (rcStrict == VINF_SUCCESS)
8756 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8757 if (rcStrict == VINF_SUCCESS)
8758 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8759 if (rcStrict == VINF_SUCCESS)
8760 pDesc->Legacy.au16[3] = 0;
8761 else
8762 return rcStrict;
8763 }
8764
8765 if (rcStrict == VINF_SUCCESS)
8766 {
8767 if ( !IEM_IS_LONG_MODE(pVCpu)
8768 || pDesc->Legacy.Gen.u1DescType)
8769 pDesc->Long.au64[1] = 0;
8770 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8771 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8772 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8773 else
8774 {
8775 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8776 /** @todo is this the right exception? */
8777 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8778 }
8779 }
8780 return rcStrict;
8781}
8782
8783
8784/**
8785 * Fetches a descriptor table entry.
8786 *
8787 * @returns Strict VBox status code.
8788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8789 * @param pDesc Where to return the descriptor table entry.
8790 * @param uSel The selector which table entry to fetch.
8791 * @param uXcpt The exception to raise on table lookup error.
8792 */
8793VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8794{
8795 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8796}
8797
8798
8799/**
8800 * Marks the selector descriptor as accessed (only non-system descriptors).
8801 *
8802 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8803 * will therefore skip the limit checks.
8804 *
8805 * @returns Strict VBox status code.
8806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8807 * @param uSel The selector.
8808 */
8809VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8810{
8811 /*
8812 * Get the selector table base and calculate the entry address.
8813 */
8814 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8815 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8816 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8817 GCPtr += uSel & X86_SEL_MASK;
8818
8819 /*
8820 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8821 * ugly stuff to avoid this. This will make sure it's an atomic access
8822 * as well more or less remove any question about 8-bit or 32-bit accesss.
8823 */
8824 VBOXSTRICTRC rcStrict;
8825 uint32_t volatile *pu32;
8826 if ((GCPtr & 3) == 0)
8827 {
8828 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8829 GCPtr += 2 + 2;
8830 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8831 if (rcStrict != VINF_SUCCESS)
8832 return rcStrict;
8833 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8834 }
8835 else
8836 {
8837 /* The misaligned GDT/LDT case, map the whole thing. */
8838 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8839 if (rcStrict != VINF_SUCCESS)
8840 return rcStrict;
8841 switch ((uintptr_t)pu32 & 3)
8842 {
8843 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8844 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8845 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8846 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8847 }
8848 }
8849
8850 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8851}
8852
8853/** @} */
8854
8855/** @name Opcode Helpers.
8856 * @{
8857 */
8858
8859/**
8860 * Calculates the effective address of a ModR/M memory operand.
8861 *
8862 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8863 *
8864 * @return Strict VBox status code.
8865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8866 * @param bRm The ModRM byte.
8867 * @param cbImmAndRspOffset - First byte: The size of any immediate
8868 * following the effective address opcode bytes
8869 * (only for RIP relative addressing).
8870 * - Second byte: RSP displacement (for POP [ESP]).
8871 * @param pGCPtrEff Where to return the effective address.
8872 */
8873VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8874{
8875 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8876# define SET_SS_DEF() \
8877 do \
8878 { \
8879 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8880 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8881 } while (0)
8882
8883 if (!IEM_IS_64BIT_CODE(pVCpu))
8884 {
8885/** @todo Check the effective address size crap! */
8886 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8887 {
8888 uint16_t u16EffAddr;
8889
8890 /* Handle the disp16 form with no registers first. */
8891 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8892 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8893 else
8894 {
8895 /* Get the displacment. */
8896 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8897 {
8898 case 0: u16EffAddr = 0; break;
8899 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8900 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8901 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8902 }
8903
8904 /* Add the base and index registers to the disp. */
8905 switch (bRm & X86_MODRM_RM_MASK)
8906 {
8907 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8908 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8909 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8910 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8911 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8912 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8913 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8914 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8915 }
8916 }
8917
8918 *pGCPtrEff = u16EffAddr;
8919 }
8920 else
8921 {
8922 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8923 uint32_t u32EffAddr;
8924
8925 /* Handle the disp32 form with no registers first. */
8926 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8927 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8928 else
8929 {
8930 /* Get the register (or SIB) value. */
8931 switch ((bRm & X86_MODRM_RM_MASK))
8932 {
8933 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8934 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8935 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8936 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8937 case 4: /* SIB */
8938 {
8939 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8940
8941 /* Get the index and scale it. */
8942 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8943 {
8944 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8945 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8946 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8947 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8948 case 4: u32EffAddr = 0; /*none */ break;
8949 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8950 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8951 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8953 }
8954 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8955
8956 /* add base */
8957 switch (bSib & X86_SIB_BASE_MASK)
8958 {
8959 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8960 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8961 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8962 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8963 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8964 case 5:
8965 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8966 {
8967 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8968 SET_SS_DEF();
8969 }
8970 else
8971 {
8972 uint32_t u32Disp;
8973 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8974 u32EffAddr += u32Disp;
8975 }
8976 break;
8977 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8978 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8980 }
8981 break;
8982 }
8983 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8984 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8985 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8986 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8987 }
8988
8989 /* Get and add the displacement. */
8990 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8991 {
8992 case 0:
8993 break;
8994 case 1:
8995 {
8996 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8997 u32EffAddr += i8Disp;
8998 break;
8999 }
9000 case 2:
9001 {
9002 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9003 u32EffAddr += u32Disp;
9004 break;
9005 }
9006 default:
9007 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9008 }
9009
9010 }
9011 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9012 *pGCPtrEff = u32EffAddr;
9013 else
9014 {
9015 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9016 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9017 }
9018 }
9019 }
9020 else
9021 {
9022 uint64_t u64EffAddr;
9023
9024 /* Handle the rip+disp32 form with no registers first. */
9025 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9026 {
9027 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9028 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9029 }
9030 else
9031 {
9032 /* Get the register (or SIB) value. */
9033 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9034 {
9035 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9036 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9037 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9038 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9039 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9040 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9041 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9042 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9043 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9044 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9045 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9046 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9047 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9048 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9049 /* SIB */
9050 case 4:
9051 case 12:
9052 {
9053 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9054
9055 /* Get the index and scale it. */
9056 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9057 {
9058 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9059 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9060 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9061 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9062 case 4: u64EffAddr = 0; /*none */ break;
9063 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9064 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9065 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9066 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9067 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9068 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9069 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9070 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9071 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9072 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9073 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9075 }
9076 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9077
9078 /* add base */
9079 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9080 {
9081 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9082 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9083 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9084 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9085 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9086 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9087 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9088 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9089 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9090 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9091 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9092 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9093 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9094 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9095 /* complicated encodings */
9096 case 5:
9097 case 13:
9098 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9099 {
9100 if (!pVCpu->iem.s.uRexB)
9101 {
9102 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9103 SET_SS_DEF();
9104 }
9105 else
9106 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9107 }
9108 else
9109 {
9110 uint32_t u32Disp;
9111 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9112 u64EffAddr += (int32_t)u32Disp;
9113 }
9114 break;
9115 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9116 }
9117 break;
9118 }
9119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9120 }
9121
9122 /* Get and add the displacement. */
9123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9124 {
9125 case 0:
9126 break;
9127 case 1:
9128 {
9129 int8_t i8Disp;
9130 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9131 u64EffAddr += i8Disp;
9132 break;
9133 }
9134 case 2:
9135 {
9136 uint32_t u32Disp;
9137 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9138 u64EffAddr += (int32_t)u32Disp;
9139 break;
9140 }
9141 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9142 }
9143
9144 }
9145
9146 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9147 *pGCPtrEff = u64EffAddr;
9148 else
9149 {
9150 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9151 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9152 }
9153 }
9154
9155 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9156 return VINF_SUCCESS;
9157}
9158
9159
9160#ifdef IEM_WITH_SETJMP
9161/**
9162 * Calculates the effective address of a ModR/M memory operand.
9163 *
9164 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9165 *
9166 * May longjmp on internal error.
9167 *
9168 * @return The effective address.
9169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9170 * @param bRm The ModRM byte.
9171 * @param cbImmAndRspOffset - First byte: The size of any immediate
9172 * following the effective address opcode bytes
9173 * (only for RIP relative addressing).
9174 * - Second byte: RSP displacement (for POP [ESP]).
9175 */
9176RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9177{
9178 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9179# define SET_SS_DEF() \
9180 do \
9181 { \
9182 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9183 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9184 } while (0)
9185
9186 if (!IEM_IS_64BIT_CODE(pVCpu))
9187 {
9188/** @todo Check the effective address size crap! */
9189 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9190 {
9191 uint16_t u16EffAddr;
9192
9193 /* Handle the disp16 form with no registers first. */
9194 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9195 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9196 else
9197 {
9198 /* Get the displacment. */
9199 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9200 {
9201 case 0: u16EffAddr = 0; break;
9202 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9203 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9204 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9205 }
9206
9207 /* Add the base and index registers to the disp. */
9208 switch (bRm & X86_MODRM_RM_MASK)
9209 {
9210 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9211 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9212 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9213 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9214 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9215 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9216 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9217 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9218 }
9219 }
9220
9221 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9222 return u16EffAddr;
9223 }
9224
9225 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9226 uint32_t u32EffAddr;
9227
9228 /* Handle the disp32 form with no registers first. */
9229 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9230 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9231 else
9232 {
9233 /* Get the register (or SIB) value. */
9234 switch ((bRm & X86_MODRM_RM_MASK))
9235 {
9236 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9237 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9238 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9239 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9240 case 4: /* SIB */
9241 {
9242 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9243
9244 /* Get the index and scale it. */
9245 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9246 {
9247 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9248 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9249 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9250 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9251 case 4: u32EffAddr = 0; /*none */ break;
9252 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9253 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9254 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9255 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9256 }
9257 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9258
9259 /* add base */
9260 switch (bSib & X86_SIB_BASE_MASK)
9261 {
9262 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9263 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9264 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9265 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9266 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9267 case 5:
9268 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9269 {
9270 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9271 SET_SS_DEF();
9272 }
9273 else
9274 {
9275 uint32_t u32Disp;
9276 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9277 u32EffAddr += u32Disp;
9278 }
9279 break;
9280 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9281 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9282 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9283 }
9284 break;
9285 }
9286 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9287 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9288 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9289 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9290 }
9291
9292 /* Get and add the displacement. */
9293 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9294 {
9295 case 0:
9296 break;
9297 case 1:
9298 {
9299 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9300 u32EffAddr += i8Disp;
9301 break;
9302 }
9303 case 2:
9304 {
9305 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9306 u32EffAddr += u32Disp;
9307 break;
9308 }
9309 default:
9310 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9311 }
9312 }
9313
9314 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9315 {
9316 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9317 return u32EffAddr;
9318 }
9319 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9320 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9321 return u32EffAddr & UINT16_MAX;
9322 }
9323
9324 uint64_t u64EffAddr;
9325
9326 /* Handle the rip+disp32 form with no registers first. */
9327 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9328 {
9329 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9330 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9331 }
9332 else
9333 {
9334 /* Get the register (or SIB) value. */
9335 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9336 {
9337 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9338 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9339 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9340 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9341 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9342 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9343 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9344 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9345 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9346 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9347 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9348 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9349 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9350 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9351 /* SIB */
9352 case 4:
9353 case 12:
9354 {
9355 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9356
9357 /* Get the index and scale it. */
9358 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9359 {
9360 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9361 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9362 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9363 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9364 case 4: u64EffAddr = 0; /*none */ break;
9365 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9366 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9367 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9368 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9369 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9370 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9371 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9372 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9373 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9374 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9375 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9376 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9377 }
9378 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9379
9380 /* add base */
9381 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9382 {
9383 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9384 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9385 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9386 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9387 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9388 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9389 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9390 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9391 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9392 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9393 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9394 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9395 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9396 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9397 /* complicated encodings */
9398 case 5:
9399 case 13:
9400 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9401 {
9402 if (!pVCpu->iem.s.uRexB)
9403 {
9404 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9405 SET_SS_DEF();
9406 }
9407 else
9408 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9409 }
9410 else
9411 {
9412 uint32_t u32Disp;
9413 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9414 u64EffAddr += (int32_t)u32Disp;
9415 }
9416 break;
9417 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9418 }
9419 break;
9420 }
9421 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9422 }
9423
9424 /* Get and add the displacement. */
9425 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9426 {
9427 case 0:
9428 break;
9429 case 1:
9430 {
9431 int8_t i8Disp;
9432 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9433 u64EffAddr += i8Disp;
9434 break;
9435 }
9436 case 2:
9437 {
9438 uint32_t u32Disp;
9439 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9440 u64EffAddr += (int32_t)u32Disp;
9441 break;
9442 }
9443 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9444 }
9445
9446 }
9447
9448 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9449 {
9450 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9451 return u64EffAddr;
9452 }
9453 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9454 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9455 return u64EffAddr & UINT32_MAX;
9456}
9457#endif /* IEM_WITH_SETJMP */
9458
9459
9460/**
9461 * Calculates the effective address of a ModR/M memory operand, extended version
9462 * for use in the recompilers.
9463 *
9464 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9465 *
9466 * @return Strict VBox status code.
9467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9468 * @param bRm The ModRM byte.
9469 * @param cbImmAndRspOffset - First byte: The size of any immediate
9470 * following the effective address opcode bytes
9471 * (only for RIP relative addressing).
9472 * - Second byte: RSP displacement (for POP [ESP]).
9473 * @param pGCPtrEff Where to return the effective address.
9474 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9475 * SIB byte (bits 39:32).
9476 */
9477VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9478{
9479 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9480# define SET_SS_DEF() \
9481 do \
9482 { \
9483 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9484 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9485 } while (0)
9486
9487 uint64_t uInfo;
9488 if (!IEM_IS_64BIT_CODE(pVCpu))
9489 {
9490/** @todo Check the effective address size crap! */
9491 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9492 {
9493 uint16_t u16EffAddr;
9494
9495 /* Handle the disp16 form with no registers first. */
9496 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9497 {
9498 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9499 uInfo = u16EffAddr;
9500 }
9501 else
9502 {
9503 /* Get the displacment. */
9504 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9505 {
9506 case 0: u16EffAddr = 0; break;
9507 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9508 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9509 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9510 }
9511 uInfo = u16EffAddr;
9512
9513 /* Add the base and index registers to the disp. */
9514 switch (bRm & X86_MODRM_RM_MASK)
9515 {
9516 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9517 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9518 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9519 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9520 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9521 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9522 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9523 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9524 }
9525 }
9526
9527 *pGCPtrEff = u16EffAddr;
9528 }
9529 else
9530 {
9531 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9532 uint32_t u32EffAddr;
9533
9534 /* Handle the disp32 form with no registers first. */
9535 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9536 {
9537 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9538 uInfo = u32EffAddr;
9539 }
9540 else
9541 {
9542 /* Get the register (or SIB) value. */
9543 uInfo = 0;
9544 switch ((bRm & X86_MODRM_RM_MASK))
9545 {
9546 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9547 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9548 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9549 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9550 case 4: /* SIB */
9551 {
9552 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9553 uInfo = (uint64_t)bSib << 32;
9554
9555 /* Get the index and scale it. */
9556 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9557 {
9558 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9559 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9560 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9561 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9562 case 4: u32EffAddr = 0; /*none */ break;
9563 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9564 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9565 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9566 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9567 }
9568 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9569
9570 /* add base */
9571 switch (bSib & X86_SIB_BASE_MASK)
9572 {
9573 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9574 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9575 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9576 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9577 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9578 case 5:
9579 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9580 {
9581 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9582 SET_SS_DEF();
9583 }
9584 else
9585 {
9586 uint32_t u32Disp;
9587 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9588 u32EffAddr += u32Disp;
9589 uInfo |= u32Disp;
9590 }
9591 break;
9592 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9593 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9595 }
9596 break;
9597 }
9598 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9599 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9600 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9602 }
9603
9604 /* Get and add the displacement. */
9605 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9606 {
9607 case 0:
9608 break;
9609 case 1:
9610 {
9611 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9612 u32EffAddr += i8Disp;
9613 uInfo |= (uint32_t)(int32_t)i8Disp;
9614 break;
9615 }
9616 case 2:
9617 {
9618 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9619 u32EffAddr += u32Disp;
9620 uInfo |= (uint32_t)u32Disp;
9621 break;
9622 }
9623 default:
9624 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9625 }
9626
9627 }
9628 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9629 *pGCPtrEff = u32EffAddr;
9630 else
9631 {
9632 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9633 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9634 }
9635 }
9636 }
9637 else
9638 {
9639 uint64_t u64EffAddr;
9640
9641 /* Handle the rip+disp32 form with no registers first. */
9642 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9643 {
9644 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9645 uInfo = (uint32_t)u64EffAddr;
9646 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9647 }
9648 else
9649 {
9650 /* Get the register (or SIB) value. */
9651 uInfo = 0;
9652 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9653 {
9654 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9655 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9656 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9657 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9658 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9659 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9660 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9661 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9662 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9663 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9664 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9665 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9666 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9667 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9668 /* SIB */
9669 case 4:
9670 case 12:
9671 {
9672 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9673 uInfo = (uint64_t)bSib << 32;
9674
9675 /* Get the index and scale it. */
9676 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9677 {
9678 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9679 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9680 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9681 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9682 case 4: u64EffAddr = 0; /*none */ break;
9683 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9684 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9685 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9686 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9687 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9688 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9689 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9690 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9691 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9692 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9693 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9695 }
9696 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9697
9698 /* add base */
9699 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9700 {
9701 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9702 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9703 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9704 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9705 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9706 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9707 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9708 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9709 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9710 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9711 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9712 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9713 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9714 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9715 /* complicated encodings */
9716 case 5:
9717 case 13:
9718 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9719 {
9720 if (!pVCpu->iem.s.uRexB)
9721 {
9722 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9723 SET_SS_DEF();
9724 }
9725 else
9726 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9727 }
9728 else
9729 {
9730 uint32_t u32Disp;
9731 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9732 u64EffAddr += (int32_t)u32Disp;
9733 uInfo |= u32Disp;
9734 }
9735 break;
9736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9737 }
9738 break;
9739 }
9740 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9741 }
9742
9743 /* Get and add the displacement. */
9744 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9745 {
9746 case 0:
9747 break;
9748 case 1:
9749 {
9750 int8_t i8Disp;
9751 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9752 u64EffAddr += i8Disp;
9753 uInfo |= (uint32_t)(int32_t)i8Disp;
9754 break;
9755 }
9756 case 2:
9757 {
9758 uint32_t u32Disp;
9759 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9760 u64EffAddr += (int32_t)u32Disp;
9761 uInfo |= u32Disp;
9762 break;
9763 }
9764 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9765 }
9766
9767 }
9768
9769 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9770 *pGCPtrEff = u64EffAddr;
9771 else
9772 {
9773 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9774 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9775 }
9776 }
9777 *puInfo = uInfo;
9778
9779 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9780 return VINF_SUCCESS;
9781}
9782
9783/** @} */
9784
9785
9786#ifdef LOG_ENABLED
9787/**
9788 * Logs the current instruction.
9789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9790 * @param fSameCtx Set if we have the same context information as the VMM,
9791 * clear if we may have already executed an instruction in
9792 * our debug context. When clear, we assume IEMCPU holds
9793 * valid CPU mode info.
9794 *
9795 * The @a fSameCtx parameter is now misleading and obsolete.
9796 * @param pszFunction The IEM function doing the execution.
9797 */
9798static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9799{
9800# ifdef IN_RING3
9801 if (LogIs2Enabled())
9802 {
9803 char szInstr[256];
9804 uint32_t cbInstr = 0;
9805 if (fSameCtx)
9806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9807 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9808 szInstr, sizeof(szInstr), &cbInstr);
9809 else
9810 {
9811 uint32_t fFlags = 0;
9812 switch (IEM_GET_CPU_MODE(pVCpu))
9813 {
9814 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9815 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9816 case IEMMODE_16BIT:
9817 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9818 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9819 else
9820 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9821 break;
9822 }
9823 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9824 szInstr, sizeof(szInstr), &cbInstr);
9825 }
9826
9827 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9828 Log2(("**** %s fExec=%x\n"
9829 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9830 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9831 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9832 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9833 " %s\n"
9834 , pszFunction, pVCpu->iem.s.fExec,
9835 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9836 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9837 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9838 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9839 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9840 szInstr));
9841
9842 if (LogIs3Enabled())
9843 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9844 }
9845 else
9846# endif
9847 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9848 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9849 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9850}
9851#endif /* LOG_ENABLED */
9852
9853
9854#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9855/**
9856 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9857 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9858 *
9859 * @returns Modified rcStrict.
9860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9861 * @param rcStrict The instruction execution status.
9862 */
9863static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9864{
9865 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9866 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9867 {
9868 /* VMX preemption timer takes priority over NMI-window exits. */
9869 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9870 {
9871 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9872 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9873 }
9874 /*
9875 * Check remaining intercepts.
9876 *
9877 * NMI-window and Interrupt-window VM-exits.
9878 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9879 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9880 *
9881 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9882 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9883 */
9884 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9885 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9886 && !TRPMHasTrap(pVCpu))
9887 {
9888 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9889 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9890 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9891 {
9892 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9893 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9894 }
9895 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9896 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9897 {
9898 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9899 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9900 }
9901 }
9902 }
9903 /* TPR-below threshold/APIC write has the highest priority. */
9904 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9905 {
9906 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9907 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9908 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9909 }
9910 /* MTF takes priority over VMX-preemption timer. */
9911 else
9912 {
9913 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9914 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9915 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9916 }
9917 return rcStrict;
9918}
9919#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9920
9921
9922/**
9923 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9924 * IEMExecOneWithPrefetchedByPC.
9925 *
9926 * Similar code is found in IEMExecLots.
9927 *
9928 * @return Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9930 * @param fExecuteInhibit If set, execute the instruction following CLI,
9931 * POP SS and MOV SS,GR.
9932 * @param pszFunction The calling function name.
9933 */
9934DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9935{
9936 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9937 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9938 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9939 RT_NOREF_PV(pszFunction);
9940
9941#ifdef IEM_WITH_SETJMP
9942 VBOXSTRICTRC rcStrict;
9943 IEM_TRY_SETJMP(pVCpu, rcStrict)
9944 {
9945 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9946 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9947 }
9948 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9949 {
9950 pVCpu->iem.s.cLongJumps++;
9951 }
9952 IEM_CATCH_LONGJMP_END(pVCpu);
9953#else
9954 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9955 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9956#endif
9957 if (rcStrict == VINF_SUCCESS)
9958 pVCpu->iem.s.cInstructions++;
9959 if (pVCpu->iem.s.cActiveMappings > 0)
9960 {
9961 Assert(rcStrict != VINF_SUCCESS);
9962 iemMemRollback(pVCpu);
9963 }
9964 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9965 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9966 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9967
9968//#ifdef DEBUG
9969// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9970//#endif
9971
9972#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9973 /*
9974 * Perform any VMX nested-guest instruction boundary actions.
9975 *
9976 * If any of these causes a VM-exit, we must skip executing the next
9977 * instruction (would run into stale page tables). A VM-exit makes sure
9978 * there is no interrupt-inhibition, so that should ensure we don't go
9979 * to try execute the next instruction. Clearing fExecuteInhibit is
9980 * problematic because of the setjmp/longjmp clobbering above.
9981 */
9982 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9983 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9984 || rcStrict != VINF_SUCCESS)
9985 { /* likely */ }
9986 else
9987 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9988#endif
9989
9990 /* Execute the next instruction as well if a cli, pop ss or
9991 mov ss, Gr has just completed successfully. */
9992 if ( fExecuteInhibit
9993 && rcStrict == VINF_SUCCESS
9994 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9995 {
9996 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9997 if (rcStrict == VINF_SUCCESS)
9998 {
9999#ifdef LOG_ENABLED
10000 iemLogCurInstr(pVCpu, false, pszFunction);
10001#endif
10002#ifdef IEM_WITH_SETJMP
10003 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10004 {
10005 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10006 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10007 }
10008 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10009 {
10010 pVCpu->iem.s.cLongJumps++;
10011 }
10012 IEM_CATCH_LONGJMP_END(pVCpu);
10013#else
10014 IEM_OPCODE_GET_FIRST_U8(&b);
10015 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10016#endif
10017 if (rcStrict == VINF_SUCCESS)
10018 {
10019 pVCpu->iem.s.cInstructions++;
10020#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10021 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10022 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10023 { /* likely */ }
10024 else
10025 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10026#endif
10027 }
10028 if (pVCpu->iem.s.cActiveMappings > 0)
10029 {
10030 Assert(rcStrict != VINF_SUCCESS);
10031 iemMemRollback(pVCpu);
10032 }
10033 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10034 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10035 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10036 }
10037 else if (pVCpu->iem.s.cActiveMappings > 0)
10038 iemMemRollback(pVCpu);
10039 /** @todo drop this after we bake this change into RIP advancing. */
10040 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10041 }
10042
10043 /*
10044 * Return value fiddling, statistics and sanity assertions.
10045 */
10046 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10047
10048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10050 return rcStrict;
10051}
10052
10053
10054/**
10055 * Execute one instruction.
10056 *
10057 * @return Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10059 */
10060VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10061{
10062 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10063#ifdef LOG_ENABLED
10064 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10065#endif
10066
10067 /*
10068 * Do the decoding and emulation.
10069 */
10070 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10071 if (rcStrict == VINF_SUCCESS)
10072 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10073 else if (pVCpu->iem.s.cActiveMappings > 0)
10074 iemMemRollback(pVCpu);
10075
10076 if (rcStrict != VINF_SUCCESS)
10077 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10078 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10079 return rcStrict;
10080}
10081
10082
10083VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10084{
10085 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10086 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10087 if (rcStrict == VINF_SUCCESS)
10088 {
10089 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10090 if (pcbWritten)
10091 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10092 }
10093 else if (pVCpu->iem.s.cActiveMappings > 0)
10094 iemMemRollback(pVCpu);
10095
10096 return rcStrict;
10097}
10098
10099
10100VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10101 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10102{
10103 VBOXSTRICTRC rcStrict;
10104 if ( cbOpcodeBytes
10105 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10106 {
10107 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10108#ifdef IEM_WITH_CODE_TLB
10109 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10110 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10111 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10112 pVCpu->iem.s.offCurInstrStart = 0;
10113 pVCpu->iem.s.offInstrNextByte = 0;
10114 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10115#else
10116 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10117 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10118#endif
10119 rcStrict = VINF_SUCCESS;
10120 }
10121 else
10122 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10123 if (rcStrict == VINF_SUCCESS)
10124 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10125 else if (pVCpu->iem.s.cActiveMappings > 0)
10126 iemMemRollback(pVCpu);
10127
10128 return rcStrict;
10129}
10130
10131
10132VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10133{
10134 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10135 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10136 if (rcStrict == VINF_SUCCESS)
10137 {
10138 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10139 if (pcbWritten)
10140 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10141 }
10142 else if (pVCpu->iem.s.cActiveMappings > 0)
10143 iemMemRollback(pVCpu);
10144
10145 return rcStrict;
10146}
10147
10148
10149VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10150 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10151{
10152 VBOXSTRICTRC rcStrict;
10153 if ( cbOpcodeBytes
10154 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10155 {
10156 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10157#ifdef IEM_WITH_CODE_TLB
10158 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10159 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10160 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10161 pVCpu->iem.s.offCurInstrStart = 0;
10162 pVCpu->iem.s.offInstrNextByte = 0;
10163 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10164#else
10165 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10166 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10167#endif
10168 rcStrict = VINF_SUCCESS;
10169 }
10170 else
10171 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10172 if (rcStrict == VINF_SUCCESS)
10173 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10174 else if (pVCpu->iem.s.cActiveMappings > 0)
10175 iemMemRollback(pVCpu);
10176
10177 return rcStrict;
10178}
10179
10180
10181/**
10182 * For handling split cacheline lock operations when the host has split-lock
10183 * detection enabled.
10184 *
10185 * This will cause the interpreter to disregard the lock prefix and implicit
10186 * locking (xchg).
10187 *
10188 * @returns Strict VBox status code.
10189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10190 */
10191VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10192{
10193 /*
10194 * Do the decoding and emulation.
10195 */
10196 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10197 if (rcStrict == VINF_SUCCESS)
10198 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10199 else if (pVCpu->iem.s.cActiveMappings > 0)
10200 iemMemRollback(pVCpu);
10201
10202 if (rcStrict != VINF_SUCCESS)
10203 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10204 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10205 return rcStrict;
10206}
10207
10208
10209/**
10210 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10211 * inject a pending TRPM trap.
10212 */
10213VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10214{
10215 Assert(TRPMHasTrap(pVCpu));
10216
10217 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10218 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10219 {
10220 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10221#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10222 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10223 if (fIntrEnabled)
10224 {
10225 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10226 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10227 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10228 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10229 else
10230 {
10231 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10232 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10233 }
10234 }
10235#else
10236 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10237#endif
10238 if (fIntrEnabled)
10239 {
10240 uint8_t u8TrapNo;
10241 TRPMEVENT enmType;
10242 uint32_t uErrCode;
10243 RTGCPTR uCr2;
10244 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10245 AssertRC(rc2);
10246 Assert(enmType == TRPM_HARDWARE_INT);
10247 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10248
10249 TRPMResetTrap(pVCpu);
10250
10251#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10252 /* Injecting an event may cause a VM-exit. */
10253 if ( rcStrict != VINF_SUCCESS
10254 && rcStrict != VINF_IEM_RAISED_XCPT)
10255 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10256#else
10257 NOREF(rcStrict);
10258#endif
10259 }
10260 }
10261
10262 return VINF_SUCCESS;
10263}
10264
10265
10266VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10267{
10268 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10269 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10270 Assert(cMaxInstructions > 0);
10271
10272 /*
10273 * See if there is an interrupt pending in TRPM, inject it if we can.
10274 */
10275 /** @todo What if we are injecting an exception and not an interrupt? Is that
10276 * possible here? For now we assert it is indeed only an interrupt. */
10277 if (!TRPMHasTrap(pVCpu))
10278 { /* likely */ }
10279 else
10280 {
10281 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10282 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10283 { /*likely */ }
10284 else
10285 return rcStrict;
10286 }
10287
10288 /*
10289 * Initial decoder init w/ prefetch, then setup setjmp.
10290 */
10291 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10292 if (rcStrict == VINF_SUCCESS)
10293 {
10294#ifdef IEM_WITH_SETJMP
10295 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10296 IEM_TRY_SETJMP(pVCpu, rcStrict)
10297#endif
10298 {
10299 /*
10300 * The run loop. We limit ourselves to 4096 instructions right now.
10301 */
10302 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10303 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10304 for (;;)
10305 {
10306 /*
10307 * Log the state.
10308 */
10309#ifdef LOG_ENABLED
10310 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10311#endif
10312
10313 /*
10314 * Do the decoding and emulation.
10315 */
10316 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10317 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10318#ifdef VBOX_STRICT
10319 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10320#endif
10321 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10322 {
10323 Assert(pVCpu->iem.s.cActiveMappings == 0);
10324 pVCpu->iem.s.cInstructions++;
10325
10326#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10327 /* Perform any VMX nested-guest instruction boundary actions. */
10328 uint64_t fCpu = pVCpu->fLocalForcedActions;
10329 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10330 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10331 { /* likely */ }
10332 else
10333 {
10334 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10335 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10336 fCpu = pVCpu->fLocalForcedActions;
10337 else
10338 {
10339 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10340 break;
10341 }
10342 }
10343#endif
10344 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10345 {
10346#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10347 uint64_t fCpu = pVCpu->fLocalForcedActions;
10348#endif
10349 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10350 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10351 | VMCPU_FF_TLB_FLUSH
10352 | VMCPU_FF_UNHALT );
10353
10354 if (RT_LIKELY( ( !fCpu
10355 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10356 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10357 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10358 {
10359 if (--cMaxInstructionsGccStupidity > 0)
10360 {
10361 /* Poll timers every now an then according to the caller's specs. */
10362 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10363 || !TMTimerPollBool(pVM, pVCpu))
10364 {
10365 Assert(pVCpu->iem.s.cActiveMappings == 0);
10366 iemReInitDecoder(pVCpu);
10367 continue;
10368 }
10369 }
10370 }
10371 }
10372 Assert(pVCpu->iem.s.cActiveMappings == 0);
10373 }
10374 else if (pVCpu->iem.s.cActiveMappings > 0)
10375 iemMemRollback(pVCpu);
10376 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10377 break;
10378 }
10379 }
10380#ifdef IEM_WITH_SETJMP
10381 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10382 {
10383 if (pVCpu->iem.s.cActiveMappings > 0)
10384 iemMemRollback(pVCpu);
10385# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10386 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10387# endif
10388 pVCpu->iem.s.cLongJumps++;
10389 }
10390 IEM_CATCH_LONGJMP_END(pVCpu);
10391#endif
10392
10393 /*
10394 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10395 */
10396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10398 }
10399 else
10400 {
10401 if (pVCpu->iem.s.cActiveMappings > 0)
10402 iemMemRollback(pVCpu);
10403
10404#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10405 /*
10406 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10407 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10408 */
10409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10410#endif
10411 }
10412
10413 /*
10414 * Maybe re-enter raw-mode and log.
10415 */
10416 if (rcStrict != VINF_SUCCESS)
10417 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10418 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10419 if (pcInstructions)
10420 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10421 return rcStrict;
10422}
10423
10424
10425/**
10426 * Interface used by EMExecuteExec, does exit statistics and limits.
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure.
10430 * @param fWillExit To be defined.
10431 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10432 * @param cMaxInstructions Maximum number of instructions to execute.
10433 * @param cMaxInstructionsWithoutExits
10434 * The max number of instructions without exits.
10435 * @param pStats Where to return statistics.
10436 */
10437VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10438 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10439{
10440 NOREF(fWillExit); /** @todo define flexible exit crits */
10441
10442 /*
10443 * Initialize return stats.
10444 */
10445 pStats->cInstructions = 0;
10446 pStats->cExits = 0;
10447 pStats->cMaxExitDistance = 0;
10448 pStats->cReserved = 0;
10449
10450 /*
10451 * Initial decoder init w/ prefetch, then setup setjmp.
10452 */
10453 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10454 if (rcStrict == VINF_SUCCESS)
10455 {
10456#ifdef IEM_WITH_SETJMP
10457 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10458 IEM_TRY_SETJMP(pVCpu, rcStrict)
10459#endif
10460 {
10461#ifdef IN_RING0
10462 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10463#endif
10464 uint32_t cInstructionSinceLastExit = 0;
10465
10466 /*
10467 * The run loop. We limit ourselves to 4096 instructions right now.
10468 */
10469 PVM pVM = pVCpu->CTX_SUFF(pVM);
10470 for (;;)
10471 {
10472 /*
10473 * Log the state.
10474 */
10475#ifdef LOG_ENABLED
10476 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10477#endif
10478
10479 /*
10480 * Do the decoding and emulation.
10481 */
10482 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10483
10484 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10485 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10486
10487 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10488 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10489 {
10490 pStats->cExits += 1;
10491 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10492 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10493 cInstructionSinceLastExit = 0;
10494 }
10495
10496 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10497 {
10498 Assert(pVCpu->iem.s.cActiveMappings == 0);
10499 pVCpu->iem.s.cInstructions++;
10500 pStats->cInstructions++;
10501 cInstructionSinceLastExit++;
10502
10503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10504 /* Perform any VMX nested-guest instruction boundary actions. */
10505 uint64_t fCpu = pVCpu->fLocalForcedActions;
10506 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10507 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10508 { /* likely */ }
10509 else
10510 {
10511 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10512 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10513 fCpu = pVCpu->fLocalForcedActions;
10514 else
10515 {
10516 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10517 break;
10518 }
10519 }
10520#endif
10521 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10522 {
10523#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10524 uint64_t fCpu = pVCpu->fLocalForcedActions;
10525#endif
10526 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10527 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10528 | VMCPU_FF_TLB_FLUSH
10529 | VMCPU_FF_UNHALT );
10530 if (RT_LIKELY( ( ( !fCpu
10531 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10532 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10533 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10534 || pStats->cInstructions < cMinInstructions))
10535 {
10536 if (pStats->cInstructions < cMaxInstructions)
10537 {
10538 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10539 {
10540#ifdef IN_RING0
10541 if ( !fCheckPreemptionPending
10542 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10543#endif
10544 {
10545 Assert(pVCpu->iem.s.cActiveMappings == 0);
10546 iemReInitDecoder(pVCpu);
10547 continue;
10548 }
10549#ifdef IN_RING0
10550 rcStrict = VINF_EM_RAW_INTERRUPT;
10551 break;
10552#endif
10553 }
10554 }
10555 }
10556 Assert(!(fCpu & VMCPU_FF_IEM));
10557 }
10558 Assert(pVCpu->iem.s.cActiveMappings == 0);
10559 }
10560 else if (pVCpu->iem.s.cActiveMappings > 0)
10561 iemMemRollback(pVCpu);
10562 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10563 break;
10564 }
10565 }
10566#ifdef IEM_WITH_SETJMP
10567 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10568 {
10569 if (pVCpu->iem.s.cActiveMappings > 0)
10570 iemMemRollback(pVCpu);
10571 pVCpu->iem.s.cLongJumps++;
10572 }
10573 IEM_CATCH_LONGJMP_END(pVCpu);
10574#endif
10575
10576 /*
10577 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10578 */
10579 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10581 }
10582 else
10583 {
10584 if (pVCpu->iem.s.cActiveMappings > 0)
10585 iemMemRollback(pVCpu);
10586
10587#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10588 /*
10589 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10590 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10591 */
10592 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10593#endif
10594 }
10595
10596 /*
10597 * Maybe re-enter raw-mode and log.
10598 */
10599 if (rcStrict != VINF_SUCCESS)
10600 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10601 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10602 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10603 return rcStrict;
10604}
10605
10606
10607/**
10608 * Injects a trap, fault, abort, software interrupt or external interrupt.
10609 *
10610 * The parameter list matches TRPMQueryTrapAll pretty closely.
10611 *
10612 * @returns Strict VBox status code.
10613 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10614 * @param u8TrapNo The trap number.
10615 * @param enmType What type is it (trap/fault/abort), software
10616 * interrupt or hardware interrupt.
10617 * @param uErrCode The error code if applicable.
10618 * @param uCr2 The CR2 value if applicable.
10619 * @param cbInstr The instruction length (only relevant for
10620 * software interrupts).
10621 */
10622VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10623 uint8_t cbInstr)
10624{
10625 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10626#ifdef DBGFTRACE_ENABLED
10627 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10628 u8TrapNo, enmType, uErrCode, uCr2);
10629#endif
10630
10631 uint32_t fFlags;
10632 switch (enmType)
10633 {
10634 case TRPM_HARDWARE_INT:
10635 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10636 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10637 uErrCode = uCr2 = 0;
10638 break;
10639
10640 case TRPM_SOFTWARE_INT:
10641 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10642 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10643 uErrCode = uCr2 = 0;
10644 break;
10645
10646 case TRPM_TRAP:
10647 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10648 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10649 if (u8TrapNo == X86_XCPT_PF)
10650 fFlags |= IEM_XCPT_FLAGS_CR2;
10651 switch (u8TrapNo)
10652 {
10653 case X86_XCPT_DF:
10654 case X86_XCPT_TS:
10655 case X86_XCPT_NP:
10656 case X86_XCPT_SS:
10657 case X86_XCPT_PF:
10658 case X86_XCPT_AC:
10659 case X86_XCPT_GP:
10660 fFlags |= IEM_XCPT_FLAGS_ERR;
10661 break;
10662 }
10663 break;
10664
10665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10666 }
10667
10668 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10669
10670 if (pVCpu->iem.s.cActiveMappings > 0)
10671 iemMemRollback(pVCpu);
10672
10673 return rcStrict;
10674}
10675
10676
10677/**
10678 * Injects the active TRPM event.
10679 *
10680 * @returns Strict VBox status code.
10681 * @param pVCpu The cross context virtual CPU structure.
10682 */
10683VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10684{
10685#ifndef IEM_IMPLEMENTS_TASKSWITCH
10686 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10687#else
10688 uint8_t u8TrapNo;
10689 TRPMEVENT enmType;
10690 uint32_t uErrCode;
10691 RTGCUINTPTR uCr2;
10692 uint8_t cbInstr;
10693 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10694 if (RT_FAILURE(rc))
10695 return rc;
10696
10697 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10698 * ICEBP \#DB injection as a special case. */
10699 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10700#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10701 if (rcStrict == VINF_SVM_VMEXIT)
10702 rcStrict = VINF_SUCCESS;
10703#endif
10704#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10705 if (rcStrict == VINF_VMX_VMEXIT)
10706 rcStrict = VINF_SUCCESS;
10707#endif
10708 /** @todo Are there any other codes that imply the event was successfully
10709 * delivered to the guest? See @bugref{6607}. */
10710 if ( rcStrict == VINF_SUCCESS
10711 || rcStrict == VINF_IEM_RAISED_XCPT)
10712 TRPMResetTrap(pVCpu);
10713
10714 return rcStrict;
10715#endif
10716}
10717
10718
10719VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10720{
10721 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10722 return VERR_NOT_IMPLEMENTED;
10723}
10724
10725
10726VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10727{
10728 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10729 return VERR_NOT_IMPLEMENTED;
10730}
10731
10732
10733/**
10734 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10735 *
10736 * This API ASSUMES that the caller has already verified that the guest code is
10737 * allowed to access the I/O port. (The I/O port is in the DX register in the
10738 * guest state.)
10739 *
10740 * @returns Strict VBox status code.
10741 * @param pVCpu The cross context virtual CPU structure.
10742 * @param cbValue The size of the I/O port access (1, 2, or 4).
10743 * @param enmAddrMode The addressing mode.
10744 * @param fRepPrefix Indicates whether a repeat prefix is used
10745 * (doesn't matter which for this instruction).
10746 * @param cbInstr The instruction length in bytes.
10747 * @param iEffSeg The effective segment address.
10748 * @param fIoChecked Whether the access to the I/O port has been
10749 * checked or not. It's typically checked in the
10750 * HM scenario.
10751 */
10752VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10753 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10754{
10755 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10756 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10757
10758 /*
10759 * State init.
10760 */
10761 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10762
10763 /*
10764 * Switch orgy for getting to the right handler.
10765 */
10766 VBOXSTRICTRC rcStrict;
10767 if (fRepPrefix)
10768 {
10769 switch (enmAddrMode)
10770 {
10771 case IEMMODE_16BIT:
10772 switch (cbValue)
10773 {
10774 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10775 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10776 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10777 default:
10778 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10779 }
10780 break;
10781
10782 case IEMMODE_32BIT:
10783 switch (cbValue)
10784 {
10785 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10786 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10787 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10788 default:
10789 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10790 }
10791 break;
10792
10793 case IEMMODE_64BIT:
10794 switch (cbValue)
10795 {
10796 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10797 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10798 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10799 default:
10800 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10801 }
10802 break;
10803
10804 default:
10805 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10806 }
10807 }
10808 else
10809 {
10810 switch (enmAddrMode)
10811 {
10812 case IEMMODE_16BIT:
10813 switch (cbValue)
10814 {
10815 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10816 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10817 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10818 default:
10819 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10820 }
10821 break;
10822
10823 case IEMMODE_32BIT:
10824 switch (cbValue)
10825 {
10826 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10827 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10828 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10829 default:
10830 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10831 }
10832 break;
10833
10834 case IEMMODE_64BIT:
10835 switch (cbValue)
10836 {
10837 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10838 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10839 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10840 default:
10841 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10842 }
10843 break;
10844
10845 default:
10846 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10847 }
10848 }
10849
10850 if (pVCpu->iem.s.cActiveMappings)
10851 iemMemRollback(pVCpu);
10852
10853 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10854}
10855
10856
10857/**
10858 * Interface for HM and EM for executing string I/O IN (read) instructions.
10859 *
10860 * This API ASSUMES that the caller has already verified that the guest code is
10861 * allowed to access the I/O port. (The I/O port is in the DX register in the
10862 * guest state.)
10863 *
10864 * @returns Strict VBox status code.
10865 * @param pVCpu The cross context virtual CPU structure.
10866 * @param cbValue The size of the I/O port access (1, 2, or 4).
10867 * @param enmAddrMode The addressing mode.
10868 * @param fRepPrefix Indicates whether a repeat prefix is used
10869 * (doesn't matter which for this instruction).
10870 * @param cbInstr The instruction length in bytes.
10871 * @param fIoChecked Whether the access to the I/O port has been
10872 * checked or not. It's typically checked in the
10873 * HM scenario.
10874 */
10875VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10876 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10877{
10878 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10879
10880 /*
10881 * State init.
10882 */
10883 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10884
10885 /*
10886 * Switch orgy for getting to the right handler.
10887 */
10888 VBOXSTRICTRC rcStrict;
10889 if (fRepPrefix)
10890 {
10891 switch (enmAddrMode)
10892 {
10893 case IEMMODE_16BIT:
10894 switch (cbValue)
10895 {
10896 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10897 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10898 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10899 default:
10900 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10901 }
10902 break;
10903
10904 case IEMMODE_32BIT:
10905 switch (cbValue)
10906 {
10907 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10908 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10909 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10910 default:
10911 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10912 }
10913 break;
10914
10915 case IEMMODE_64BIT:
10916 switch (cbValue)
10917 {
10918 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10919 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10920 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10921 default:
10922 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10923 }
10924 break;
10925
10926 default:
10927 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10928 }
10929 }
10930 else
10931 {
10932 switch (enmAddrMode)
10933 {
10934 case IEMMODE_16BIT:
10935 switch (cbValue)
10936 {
10937 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10938 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10939 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10940 default:
10941 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10942 }
10943 break;
10944
10945 case IEMMODE_32BIT:
10946 switch (cbValue)
10947 {
10948 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10949 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10950 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10951 default:
10952 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10953 }
10954 break;
10955
10956 case IEMMODE_64BIT:
10957 switch (cbValue)
10958 {
10959 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10960 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10961 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10962 default:
10963 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10964 }
10965 break;
10966
10967 default:
10968 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10969 }
10970 }
10971
10972 if ( pVCpu->iem.s.cActiveMappings == 0
10973 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10974 { /* likely */ }
10975 else
10976 {
10977 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10978 iemMemRollback(pVCpu);
10979 }
10980 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10981}
10982
10983
10984/**
10985 * Interface for rawmode to write execute an OUT instruction.
10986 *
10987 * @returns Strict VBox status code.
10988 * @param pVCpu The cross context virtual CPU structure.
10989 * @param cbInstr The instruction length in bytes.
10990 * @param u16Port The port to read.
10991 * @param fImm Whether the port is specified using an immediate operand or
10992 * using the implicit DX register.
10993 * @param cbReg The register size.
10994 *
10995 * @remarks In ring-0 not all of the state needs to be synced in.
10996 */
10997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10998{
10999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11000 Assert(cbReg <= 4 && cbReg != 3);
11001
11002 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11003 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11004 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11005 Assert(!pVCpu->iem.s.cActiveMappings);
11006 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11007}
11008
11009
11010/**
11011 * Interface for rawmode to write execute an IN instruction.
11012 *
11013 * @returns Strict VBox status code.
11014 * @param pVCpu The cross context virtual CPU structure.
11015 * @param cbInstr The instruction length in bytes.
11016 * @param u16Port The port to read.
11017 * @param fImm Whether the port is specified using an immediate operand or
11018 * using the implicit DX.
11019 * @param cbReg The register size.
11020 */
11021VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11022{
11023 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11024 Assert(cbReg <= 4 && cbReg != 3);
11025
11026 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11027 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11028 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11029 Assert(!pVCpu->iem.s.cActiveMappings);
11030 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11031}
11032
11033
11034/**
11035 * Interface for HM and EM to write to a CRx register.
11036 *
11037 * @returns Strict VBox status code.
11038 * @param pVCpu The cross context virtual CPU structure.
11039 * @param cbInstr The instruction length in bytes.
11040 * @param iCrReg The control register number (destination).
11041 * @param iGReg The general purpose register number (source).
11042 *
11043 * @remarks In ring-0 not all of the state needs to be synced in.
11044 */
11045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11046{
11047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11048 Assert(iCrReg < 16);
11049 Assert(iGReg < 16);
11050
11051 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11053 Assert(!pVCpu->iem.s.cActiveMappings);
11054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11055}
11056
11057
11058/**
11059 * Interface for HM and EM to read from a CRx register.
11060 *
11061 * @returns Strict VBox status code.
11062 * @param pVCpu The cross context virtual CPU structure.
11063 * @param cbInstr The instruction length in bytes.
11064 * @param iGReg The general purpose register number (destination).
11065 * @param iCrReg The control register number (source).
11066 *
11067 * @remarks In ring-0 not all of the state needs to be synced in.
11068 */
11069VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11070{
11071 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11072 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11073 | CPUMCTX_EXTRN_APIC_TPR);
11074 Assert(iCrReg < 16);
11075 Assert(iGReg < 16);
11076
11077 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11078 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11079 Assert(!pVCpu->iem.s.cActiveMappings);
11080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11081}
11082
11083
11084/**
11085 * Interface for HM and EM to write to a DRx register.
11086 *
11087 * @returns Strict VBox status code.
11088 * @param pVCpu The cross context virtual CPU structure.
11089 * @param cbInstr The instruction length in bytes.
11090 * @param iDrReg The debug register number (destination).
11091 * @param iGReg The general purpose register number (source).
11092 *
11093 * @remarks In ring-0 not all of the state needs to be synced in.
11094 */
11095VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11096{
11097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11098 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11099 Assert(iDrReg < 8);
11100 Assert(iGReg < 16);
11101
11102 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11103 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11104 Assert(!pVCpu->iem.s.cActiveMappings);
11105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11106}
11107
11108
11109/**
11110 * Interface for HM and EM to read from a DRx register.
11111 *
11112 * @returns Strict VBox status code.
11113 * @param pVCpu The cross context virtual CPU structure.
11114 * @param cbInstr The instruction length in bytes.
11115 * @param iGReg The general purpose register number (destination).
11116 * @param iDrReg The debug register number (source).
11117 *
11118 * @remarks In ring-0 not all of the state needs to be synced in.
11119 */
11120VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11121{
11122 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11123 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11124 Assert(iDrReg < 8);
11125 Assert(iGReg < 16);
11126
11127 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11129 Assert(!pVCpu->iem.s.cActiveMappings);
11130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11131}
11132
11133
11134/**
11135 * Interface for HM and EM to clear the CR0[TS] bit.
11136 *
11137 * @returns Strict VBox status code.
11138 * @param pVCpu The cross context virtual CPU structure.
11139 * @param cbInstr The instruction length in bytes.
11140 *
11141 * @remarks In ring-0 not all of the state needs to be synced in.
11142 */
11143VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11144{
11145 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11146
11147 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11149 Assert(!pVCpu->iem.s.cActiveMappings);
11150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11151}
11152
11153
11154/**
11155 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11156 *
11157 * @returns Strict VBox status code.
11158 * @param pVCpu The cross context virtual CPU structure.
11159 * @param cbInstr The instruction length in bytes.
11160 * @param uValue The value to load into CR0.
11161 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11162 * memory operand. Otherwise pass NIL_RTGCPTR.
11163 *
11164 * @remarks In ring-0 not all of the state needs to be synced in.
11165 */
11166VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11167{
11168 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11169
11170 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11171 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11172 Assert(!pVCpu->iem.s.cActiveMappings);
11173 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11174}
11175
11176
11177/**
11178 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11179 *
11180 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11181 *
11182 * @returns Strict VBox status code.
11183 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11184 * @param cbInstr The instruction length in bytes.
11185 * @remarks In ring-0 not all of the state needs to be synced in.
11186 * @thread EMT(pVCpu)
11187 */
11188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11189{
11190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11191
11192 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11194 Assert(!pVCpu->iem.s.cActiveMappings);
11195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11196}
11197
11198
11199/**
11200 * Interface for HM and EM to emulate the WBINVD instruction.
11201 *
11202 * @returns Strict VBox status code.
11203 * @param pVCpu The cross context virtual CPU structure.
11204 * @param cbInstr The instruction length in bytes.
11205 *
11206 * @remarks In ring-0 not all of the state needs to be synced in.
11207 */
11208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11209{
11210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11211
11212 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11214 Assert(!pVCpu->iem.s.cActiveMappings);
11215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11216}
11217
11218
11219/**
11220 * Interface for HM and EM to emulate the INVD instruction.
11221 *
11222 * @returns Strict VBox status code.
11223 * @param pVCpu The cross context virtual CPU structure.
11224 * @param cbInstr The instruction length in bytes.
11225 *
11226 * @remarks In ring-0 not all of the state needs to be synced in.
11227 */
11228VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11229{
11230 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11231
11232 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11233 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11234 Assert(!pVCpu->iem.s.cActiveMappings);
11235 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11236}
11237
11238
11239/**
11240 * Interface for HM and EM to emulate the INVLPG instruction.
11241 *
11242 * @returns Strict VBox status code.
11243 * @retval VINF_PGM_SYNC_CR3
11244 *
11245 * @param pVCpu The cross context virtual CPU structure.
11246 * @param cbInstr The instruction length in bytes.
11247 * @param GCPtrPage The effective address of the page to invalidate.
11248 *
11249 * @remarks In ring-0 not all of the state needs to be synced in.
11250 */
11251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11252{
11253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11254
11255 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11256 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11257 Assert(!pVCpu->iem.s.cActiveMappings);
11258 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11259}
11260
11261
11262/**
11263 * Interface for HM and EM to emulate the INVPCID instruction.
11264 *
11265 * @returns Strict VBox status code.
11266 * @retval VINF_PGM_SYNC_CR3
11267 *
11268 * @param pVCpu The cross context virtual CPU structure.
11269 * @param cbInstr The instruction length in bytes.
11270 * @param iEffSeg The effective segment register.
11271 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11272 * @param uType The invalidation type.
11273 *
11274 * @remarks In ring-0 not all of the state needs to be synced in.
11275 */
11276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11277 uint64_t uType)
11278{
11279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11280
11281 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11283 Assert(!pVCpu->iem.s.cActiveMappings);
11284 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11285}
11286
11287
11288/**
11289 * Interface for HM and EM to emulate the CPUID instruction.
11290 *
11291 * @returns Strict VBox status code.
11292 *
11293 * @param pVCpu The cross context virtual CPU structure.
11294 * @param cbInstr The instruction length in bytes.
11295 *
11296 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11297 */
11298VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11299{
11300 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11302
11303 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11304 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11305 Assert(!pVCpu->iem.s.cActiveMappings);
11306 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11307}
11308
11309
11310/**
11311 * Interface for HM and EM to emulate the RDPMC instruction.
11312 *
11313 * @returns Strict VBox status code.
11314 *
11315 * @param pVCpu The cross context virtual CPU structure.
11316 * @param cbInstr The instruction length in bytes.
11317 *
11318 * @remarks Not all of the state needs to be synced in.
11319 */
11320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11321{
11322 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11323 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11324
11325 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11327 Assert(!pVCpu->iem.s.cActiveMappings);
11328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11329}
11330
11331
11332/**
11333 * Interface for HM and EM to emulate the RDTSC instruction.
11334 *
11335 * @returns Strict VBox status code.
11336 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11337 *
11338 * @param pVCpu The cross context virtual CPU structure.
11339 * @param cbInstr The instruction length in bytes.
11340 *
11341 * @remarks Not all of the state needs to be synced in.
11342 */
11343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11344{
11345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11347
11348 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11349 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11350 Assert(!pVCpu->iem.s.cActiveMappings);
11351 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11352}
11353
11354
11355/**
11356 * Interface for HM and EM to emulate the RDTSCP instruction.
11357 *
11358 * @returns Strict VBox status code.
11359 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11360 *
11361 * @param pVCpu The cross context virtual CPU structure.
11362 * @param cbInstr The instruction length in bytes.
11363 *
11364 * @remarks Not all of the state needs to be synced in. Recommended
11365 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11366 */
11367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11368{
11369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11370 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11371
11372 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11373 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11374 Assert(!pVCpu->iem.s.cActiveMappings);
11375 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11376}
11377
11378
11379/**
11380 * Interface for HM and EM to emulate the RDMSR instruction.
11381 *
11382 * @returns Strict VBox status code.
11383 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11384 *
11385 * @param pVCpu The cross context virtual CPU structure.
11386 * @param cbInstr The instruction length in bytes.
11387 *
11388 * @remarks Not all of the state needs to be synced in. Requires RCX and
11389 * (currently) all MSRs.
11390 */
11391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11392{
11393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11394 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11395
11396 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11397 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11398 Assert(!pVCpu->iem.s.cActiveMappings);
11399 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11400}
11401
11402
11403/**
11404 * Interface for HM and EM to emulate the WRMSR instruction.
11405 *
11406 * @returns Strict VBox status code.
11407 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11408 *
11409 * @param pVCpu The cross context virtual CPU structure.
11410 * @param cbInstr The instruction length in bytes.
11411 *
11412 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11413 * and (currently) all MSRs.
11414 */
11415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11416{
11417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11418 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11419 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11420
11421 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11422 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11423 Assert(!pVCpu->iem.s.cActiveMappings);
11424 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11425}
11426
11427
11428/**
11429 * Interface for HM and EM to emulate the MONITOR instruction.
11430 *
11431 * @returns Strict VBox status code.
11432 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11433 *
11434 * @param pVCpu The cross context virtual CPU structure.
11435 * @param cbInstr The instruction length in bytes.
11436 *
11437 * @remarks Not all of the state needs to be synced in.
11438 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11439 * are used.
11440 */
11441VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11442{
11443 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11444 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11445
11446 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11447 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11448 Assert(!pVCpu->iem.s.cActiveMappings);
11449 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11450}
11451
11452
11453/**
11454 * Interface for HM and EM to emulate the MWAIT instruction.
11455 *
11456 * @returns Strict VBox status code.
11457 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11458 *
11459 * @param pVCpu The cross context virtual CPU structure.
11460 * @param cbInstr The instruction length in bytes.
11461 *
11462 * @remarks Not all of the state needs to be synced in.
11463 */
11464VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11465{
11466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11467 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11468
11469 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11470 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11471 Assert(!pVCpu->iem.s.cActiveMappings);
11472 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11473}
11474
11475
11476/**
11477 * Interface for HM and EM to emulate the HLT instruction.
11478 *
11479 * @returns Strict VBox status code.
11480 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11481 *
11482 * @param pVCpu The cross context virtual CPU structure.
11483 * @param cbInstr The instruction length in bytes.
11484 *
11485 * @remarks Not all of the state needs to be synced in.
11486 */
11487VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11488{
11489 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11490
11491 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11492 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11493 Assert(!pVCpu->iem.s.cActiveMappings);
11494 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11495}
11496
11497
11498/**
11499 * Checks if IEM is in the process of delivering an event (interrupt or
11500 * exception).
11501 *
11502 * @returns true if we're in the process of raising an interrupt or exception,
11503 * false otherwise.
11504 * @param pVCpu The cross context virtual CPU structure.
11505 * @param puVector Where to store the vector associated with the
11506 * currently delivered event, optional.
11507 * @param pfFlags Where to store th event delivery flags (see
11508 * IEM_XCPT_FLAGS_XXX), optional.
11509 * @param puErr Where to store the error code associated with the
11510 * event, optional.
11511 * @param puCr2 Where to store the CR2 associated with the event,
11512 * optional.
11513 * @remarks The caller should check the flags to determine if the error code and
11514 * CR2 are valid for the event.
11515 */
11516VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11517{
11518 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11519 if (fRaisingXcpt)
11520 {
11521 if (puVector)
11522 *puVector = pVCpu->iem.s.uCurXcpt;
11523 if (pfFlags)
11524 *pfFlags = pVCpu->iem.s.fCurXcpt;
11525 if (puErr)
11526 *puErr = pVCpu->iem.s.uCurXcptErr;
11527 if (puCr2)
11528 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11529 }
11530 return fRaisingXcpt;
11531}
11532
11533#ifdef IN_RING3
11534
11535/**
11536 * Handles the unlikely and probably fatal merge cases.
11537 *
11538 * @returns Merged status code.
11539 * @param rcStrict Current EM status code.
11540 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11541 * with @a rcStrict.
11542 * @param iMemMap The memory mapping index. For error reporting only.
11543 * @param pVCpu The cross context virtual CPU structure of the calling
11544 * thread, for error reporting only.
11545 */
11546DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11547 unsigned iMemMap, PVMCPUCC pVCpu)
11548{
11549 if (RT_FAILURE_NP(rcStrict))
11550 return rcStrict;
11551
11552 if (RT_FAILURE_NP(rcStrictCommit))
11553 return rcStrictCommit;
11554
11555 if (rcStrict == rcStrictCommit)
11556 return rcStrictCommit;
11557
11558 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11559 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11560 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11561 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11563 return VERR_IOM_FF_STATUS_IPE;
11564}
11565
11566
11567/**
11568 * Helper for IOMR3ProcessForceFlag.
11569 *
11570 * @returns Merged status code.
11571 * @param rcStrict Current EM status code.
11572 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11573 * with @a rcStrict.
11574 * @param iMemMap The memory mapping index. For error reporting only.
11575 * @param pVCpu The cross context virtual CPU structure of the calling
11576 * thread, for error reporting only.
11577 */
11578DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11579{
11580 /* Simple. */
11581 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11582 return rcStrictCommit;
11583
11584 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11585 return rcStrict;
11586
11587 /* EM scheduling status codes. */
11588 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11589 && rcStrict <= VINF_EM_LAST))
11590 {
11591 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11592 && rcStrictCommit <= VINF_EM_LAST))
11593 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11594 }
11595
11596 /* Unlikely */
11597 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11598}
11599
11600
11601/**
11602 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11603 *
11604 * @returns Merge between @a rcStrict and what the commit operation returned.
11605 * @param pVM The cross context VM structure.
11606 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11607 * @param rcStrict The status code returned by ring-0 or raw-mode.
11608 */
11609VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11610{
11611 /*
11612 * Reset the pending commit.
11613 */
11614 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11615 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11616 ("%#x %#x %#x\n",
11617 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11618 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11619
11620 /*
11621 * Commit the pending bounce buffers (usually just one).
11622 */
11623 unsigned cBufs = 0;
11624 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11625 while (iMemMap-- > 0)
11626 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11627 {
11628 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11629 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11630 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11631
11632 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11633 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11634 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11635
11636 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11637 {
11638 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11639 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11640 pbBuf,
11641 cbFirst,
11642 PGMACCESSORIGIN_IEM);
11643 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11644 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11645 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11646 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11647 }
11648
11649 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11650 {
11651 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11653 pbBuf + cbFirst,
11654 cbSecond,
11655 PGMACCESSORIGIN_IEM);
11656 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11657 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11658 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11659 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11660 }
11661 cBufs++;
11662 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11663 }
11664
11665 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11666 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11667 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11668 pVCpu->iem.s.cActiveMappings = 0;
11669 return rcStrict;
11670}
11671
11672#endif /* IN_RING3 */
11673
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette