VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102850

最後變更 在這個檔案從102850是 102850,由 vboxsync 提交於 15 月 前

VMM/IEM: Implemented the first of two code TLB lookups. bugref:10371

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 442.8 KB
 
1/* $Id: IEMAll.cpp 102850 2024-01-12 00:47:47Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118 /*
2119 * Push the stack frame.
2120 */
2121 uint8_t bUnmapInfo;
2122 uint16_t *pu16Frame;
2123 uint64_t uNewRsp;
2124 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2125 if (rcStrict != VINF_SUCCESS)
2126 return rcStrict;
2127
2128 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2129#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2130 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2131 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2132 fEfl |= UINT16_C(0xf000);
2133#endif
2134 pu16Frame[2] = (uint16_t)fEfl;
2135 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2136 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2137 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2138 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2139 return rcStrict;
2140
2141 /*
2142 * Load the vector address into cs:ip and make exception specific state
2143 * adjustments.
2144 */
2145 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2146 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2147 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2148 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2149 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2150 pVCpu->cpum.GstCtx.rip = Idte.off;
2151 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2152 IEMMISC_SET_EFL(pVCpu, fEfl);
2153
2154 /** @todo do we actually do this in real mode? */
2155 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2156 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2157
2158 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2159 so best leave them alone in case we're in a weird kind of real mode... */
2160
2161 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2162}
2163
2164
2165/**
2166 * Loads a NULL data selector into when coming from V8086 mode.
2167 *
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 * @param pSReg Pointer to the segment register.
2170 */
2171DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2172{
2173 pSReg->Sel = 0;
2174 pSReg->ValidSel = 0;
2175 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2176 {
2177 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2178 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2179 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2180 }
2181 else
2182 {
2183 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2184 /** @todo check this on AMD-V */
2185 pSReg->u64Base = 0;
2186 pSReg->u32Limit = 0;
2187 }
2188}
2189
2190
2191/**
2192 * Loads a segment selector during a task switch in V8086 mode.
2193 *
2194 * @param pSReg Pointer to the segment register.
2195 * @param uSel The selector value to load.
2196 */
2197DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2198{
2199 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2200 pSReg->Sel = uSel;
2201 pSReg->ValidSel = uSel;
2202 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2203 pSReg->u64Base = uSel << 4;
2204 pSReg->u32Limit = 0xffff;
2205 pSReg->Attr.u = 0xf3;
2206}
2207
2208
2209/**
2210 * Loads a segment selector during a task switch in protected mode.
2211 *
2212 * In this task switch scenario, we would throw \#TS exceptions rather than
2213 * \#GPs.
2214 *
2215 * @returns VBox strict status code.
2216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2217 * @param pSReg Pointer to the segment register.
2218 * @param uSel The new selector value.
2219 *
2220 * @remarks This does _not_ handle CS or SS.
2221 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2222 */
2223static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2224{
2225 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2226
2227 /* Null data selector. */
2228 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2229 {
2230 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2231 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2232 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2233 return VINF_SUCCESS;
2234 }
2235
2236 /* Fetch the descriptor. */
2237 IEMSELDESC Desc;
2238 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2239 if (rcStrict != VINF_SUCCESS)
2240 {
2241 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2242 VBOXSTRICTRC_VAL(rcStrict)));
2243 return rcStrict;
2244 }
2245
2246 /* Must be a data segment or readable code segment. */
2247 if ( !Desc.Legacy.Gen.u1DescType
2248 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2249 {
2250 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2251 Desc.Legacy.Gen.u4Type));
2252 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2253 }
2254
2255 /* Check privileges for data segments and non-conforming code segments. */
2256 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2257 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2258 {
2259 /* The RPL and the new CPL must be less than or equal to the DPL. */
2260 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2261 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2262 {
2263 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2264 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2265 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2266 }
2267 }
2268
2269 /* Is it there? */
2270 if (!Desc.Legacy.Gen.u1Present)
2271 {
2272 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2273 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2274 }
2275
2276 /* The base and limit. */
2277 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2278 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2279
2280 /*
2281 * Ok, everything checked out fine. Now set the accessed bit before
2282 * committing the result into the registers.
2283 */
2284 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2285 {
2286 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2287 if (rcStrict != VINF_SUCCESS)
2288 return rcStrict;
2289 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2290 }
2291
2292 /* Commit */
2293 pSReg->Sel = uSel;
2294 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2295 pSReg->u32Limit = cbLimit;
2296 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2297 pSReg->ValidSel = uSel;
2298 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2299 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2300 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2301
2302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2303 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2304 return VINF_SUCCESS;
2305}
2306
2307
2308/**
2309 * Performs a task switch.
2310 *
2311 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2312 * caller is responsible for performing the necessary checks (like DPL, TSS
2313 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2314 * reference for JMP, CALL, IRET.
2315 *
2316 * If the task switch is the due to a software interrupt or hardware exception,
2317 * the caller is responsible for validating the TSS selector and descriptor. See
2318 * Intel Instruction reference for INT n.
2319 *
2320 * @returns VBox strict status code.
2321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2322 * @param enmTaskSwitch The cause of the task switch.
2323 * @param uNextEip The EIP effective after the task switch.
2324 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2325 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2326 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2327 * @param SelTss The TSS selector of the new task.
2328 * @param pNewDescTss Pointer to the new TSS descriptor.
2329 */
2330VBOXSTRICTRC
2331iemTaskSwitch(PVMCPUCC pVCpu,
2332 IEMTASKSWITCH enmTaskSwitch,
2333 uint32_t uNextEip,
2334 uint32_t fFlags,
2335 uint16_t uErr,
2336 uint64_t uCr2,
2337 RTSEL SelTss,
2338 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2339{
2340 Assert(!IEM_IS_REAL_MODE(pVCpu));
2341 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2342 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2343
2344 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2345 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2346 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2347 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2348 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2349
2350 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2351 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2352
2353 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2354 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2355
2356 /* Update CR2 in case it's a page-fault. */
2357 /** @todo This should probably be done much earlier in IEM/PGM. See
2358 * @bugref{5653#c49}. */
2359 if (fFlags & IEM_XCPT_FLAGS_CR2)
2360 pVCpu->cpum.GstCtx.cr2 = uCr2;
2361
2362 /*
2363 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2364 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2365 */
2366 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2367 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2368 if (uNewTssLimit < uNewTssLimitMin)
2369 {
2370 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2371 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2372 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2373 }
2374
2375 /*
2376 * Task switches in VMX non-root mode always cause task switches.
2377 * The new TSS must have been read and validated (DPL, limits etc.) before a
2378 * task-switch VM-exit commences.
2379 *
2380 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2381 */
2382 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2383 {
2384 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2385 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2386 }
2387
2388 /*
2389 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2390 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2391 */
2392 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2393 {
2394 uint32_t const uExitInfo1 = SelTss;
2395 uint32_t uExitInfo2 = uErr;
2396 switch (enmTaskSwitch)
2397 {
2398 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2399 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2400 default: break;
2401 }
2402 if (fFlags & IEM_XCPT_FLAGS_ERR)
2403 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2404 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2405 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2406
2407 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2408 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2409 RT_NOREF2(uExitInfo1, uExitInfo2);
2410 }
2411
2412 /*
2413 * Check the current TSS limit. The last written byte to the current TSS during the
2414 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2415 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2416 *
2417 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2418 * end up with smaller than "legal" TSS limits.
2419 */
2420 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2421 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2422 if (uCurTssLimit < uCurTssLimitMin)
2423 {
2424 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2425 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2426 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2427 }
2428
2429 /*
2430 * Verify that the new TSS can be accessed and map it. Map only the required contents
2431 * and not the entire TSS.
2432 */
2433 uint8_t bUnmapInfoNewTss;
2434 void *pvNewTss;
2435 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2436 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2437 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2438 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2439 * not perform correct translation if this happens. See Intel spec. 7.2.1
2440 * "Task-State Segment". */
2441 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2442/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2443 * Consider wrapping the remainder into a function for simpler cleanup. */
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2447 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449 }
2450
2451 /*
2452 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2453 */
2454 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2455 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2456 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2457 {
2458 uint8_t bUnmapInfoDescCurTss;
2459 PX86DESC pDescCurTss;
2460 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2461 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2462 if (rcStrict != VINF_SUCCESS)
2463 {
2464 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2465 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2466 return rcStrict;
2467 }
2468
2469 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2470 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2471 if (rcStrict != VINF_SUCCESS)
2472 {
2473 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2474 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2475 return rcStrict;
2476 }
2477
2478 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2479 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2480 {
2481 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2482 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2483 fEFlags &= ~X86_EFL_NT;
2484 }
2485 }
2486
2487 /*
2488 * Save the CPU state into the current TSS.
2489 */
2490 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2491 if (GCPtrNewTss == GCPtrCurTss)
2492 {
2493 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2494 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2495 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2496 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2497 pVCpu->cpum.GstCtx.ldtr.Sel));
2498 }
2499 if (fIsNewTss386)
2500 {
2501 /*
2502 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2503 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2504 */
2505 uint8_t bUnmapInfoCurTss32;
2506 void *pvCurTss32;
2507 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2508 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2509 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2510 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2511 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2512 if (rcStrict != VINF_SUCCESS)
2513 {
2514 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2515 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2516 return rcStrict;
2517 }
2518
2519 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2520 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2521 pCurTss32->eip = uNextEip;
2522 pCurTss32->eflags = fEFlags;
2523 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2524 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2525 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2526 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2527 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2528 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2529 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2530 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2531 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2532 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2533 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2534 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2535 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2536 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2537
2538 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2539 if (rcStrict != VINF_SUCCESS)
2540 {
2541 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2542 VBOXSTRICTRC_VAL(rcStrict)));
2543 return rcStrict;
2544 }
2545 }
2546 else
2547 {
2548 /*
2549 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2550 */
2551 uint8_t bUnmapInfoCurTss16;
2552 void *pvCurTss16;
2553 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2554 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2555 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2556 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2557 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2558 if (rcStrict != VINF_SUCCESS)
2559 {
2560 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2561 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2562 return rcStrict;
2563 }
2564
2565 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2566 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2567 pCurTss16->ip = uNextEip;
2568 pCurTss16->flags = (uint16_t)fEFlags;
2569 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2570 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2571 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2572 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2573 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2574 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2575 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2576 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2577 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2578 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2579 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2580 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2581
2582 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2583 if (rcStrict != VINF_SUCCESS)
2584 {
2585 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2586 VBOXSTRICTRC_VAL(rcStrict)));
2587 return rcStrict;
2588 }
2589 }
2590
2591 /*
2592 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2593 */
2594 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2595 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2596 {
2597 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2598 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2599 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2600 }
2601
2602 /*
2603 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2604 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2605 */
2606 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2607 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2608 bool fNewDebugTrap;
2609 if (fIsNewTss386)
2610 {
2611 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2612 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2613 uNewEip = pNewTss32->eip;
2614 uNewEflags = pNewTss32->eflags;
2615 uNewEax = pNewTss32->eax;
2616 uNewEcx = pNewTss32->ecx;
2617 uNewEdx = pNewTss32->edx;
2618 uNewEbx = pNewTss32->ebx;
2619 uNewEsp = pNewTss32->esp;
2620 uNewEbp = pNewTss32->ebp;
2621 uNewEsi = pNewTss32->esi;
2622 uNewEdi = pNewTss32->edi;
2623 uNewES = pNewTss32->es;
2624 uNewCS = pNewTss32->cs;
2625 uNewSS = pNewTss32->ss;
2626 uNewDS = pNewTss32->ds;
2627 uNewFS = pNewTss32->fs;
2628 uNewGS = pNewTss32->gs;
2629 uNewLdt = pNewTss32->selLdt;
2630 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2631 }
2632 else
2633 {
2634 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2635 uNewCr3 = 0;
2636 uNewEip = pNewTss16->ip;
2637 uNewEflags = pNewTss16->flags;
2638 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2639 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2640 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2641 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2642 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2643 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2644 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2645 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2646 uNewES = pNewTss16->es;
2647 uNewCS = pNewTss16->cs;
2648 uNewSS = pNewTss16->ss;
2649 uNewDS = pNewTss16->ds;
2650 uNewFS = 0;
2651 uNewGS = 0;
2652 uNewLdt = pNewTss16->selLdt;
2653 fNewDebugTrap = false;
2654 }
2655
2656 if (GCPtrNewTss == GCPtrCurTss)
2657 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2658 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2659
2660 /*
2661 * We're done accessing the new TSS.
2662 */
2663 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2664 if (rcStrict != VINF_SUCCESS)
2665 {
2666 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2667 return rcStrict;
2668 }
2669
2670 /*
2671 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2672 */
2673 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2674 {
2675 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2676 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2677 if (rcStrict != VINF_SUCCESS)
2678 {
2679 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2680 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683
2684 /* Check that the descriptor indicates the new TSS is available (not busy). */
2685 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2686 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2687 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2688
2689 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2690 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2691 if (rcStrict != VINF_SUCCESS)
2692 {
2693 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2694 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2695 return rcStrict;
2696 }
2697 }
2698
2699 /*
2700 * From this point on, we're technically in the new task. We will defer exceptions
2701 * until the completion of the task switch but before executing any instructions in the new task.
2702 */
2703 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2704 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2705 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2706 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2707 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2708 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2709 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2710
2711 /* Set the busy bit in TR. */
2712 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2713
2714 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2715 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2716 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2717 {
2718 uNewEflags |= X86_EFL_NT;
2719 }
2720
2721 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2722 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2723 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2724
2725 pVCpu->cpum.GstCtx.eip = uNewEip;
2726 pVCpu->cpum.GstCtx.eax = uNewEax;
2727 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2728 pVCpu->cpum.GstCtx.edx = uNewEdx;
2729 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2730 pVCpu->cpum.GstCtx.esp = uNewEsp;
2731 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2732 pVCpu->cpum.GstCtx.esi = uNewEsi;
2733 pVCpu->cpum.GstCtx.edi = uNewEdi;
2734
2735 uNewEflags &= X86_EFL_LIVE_MASK;
2736 uNewEflags |= X86_EFL_RA1_MASK;
2737 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2738
2739 /*
2740 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2741 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2742 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2743 */
2744 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2745 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2746
2747 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2748 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2749
2750 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2751 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2752
2753 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2754 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2755
2756 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2757 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2758
2759 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2760 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2761 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2762
2763 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2764 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2765 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2766 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2767
2768 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2769 {
2770 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2771 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2772 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2773 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2774 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2775 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2776 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2777 }
2778
2779 /*
2780 * Switch CR3 for the new task.
2781 */
2782 if ( fIsNewTss386
2783 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2784 {
2785 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2786 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2787 AssertRCSuccessReturn(rc, rc);
2788
2789 /* Inform PGM. */
2790 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2791 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2792 AssertRCReturn(rc, rc);
2793 /* ignore informational status codes */
2794
2795 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2796 }
2797
2798 /*
2799 * Switch LDTR for the new task.
2800 */
2801 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2802 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2803 else
2804 {
2805 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2806
2807 IEMSELDESC DescNewLdt;
2808 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2809 if (rcStrict != VINF_SUCCESS)
2810 {
2811 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2812 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2813 return rcStrict;
2814 }
2815 if ( !DescNewLdt.Legacy.Gen.u1Present
2816 || DescNewLdt.Legacy.Gen.u1DescType
2817 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2818 {
2819 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2820 uNewLdt, DescNewLdt.Legacy.u));
2821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2822 }
2823
2824 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2825 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2826 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2827 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2828 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2829 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2830 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2832 }
2833
2834 IEMSELDESC DescSS;
2835 if (IEM_IS_V86_MODE(pVCpu))
2836 {
2837 IEM_SET_CPL(pVCpu, 3);
2838 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2839 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2840 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2841 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2842 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2843 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2844
2845 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2846 DescSS.Legacy.u = 0;
2847 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2848 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2849 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2850 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2851 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2852 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2853 DescSS.Legacy.Gen.u2Dpl = 3;
2854 }
2855 else
2856 {
2857 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2858
2859 /*
2860 * Load the stack segment for the new task.
2861 */
2862 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2863 {
2864 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2865 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2866 }
2867
2868 /* Fetch the descriptor. */
2869 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2870 if (rcStrict != VINF_SUCCESS)
2871 {
2872 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2873 VBOXSTRICTRC_VAL(rcStrict)));
2874 return rcStrict;
2875 }
2876
2877 /* SS must be a data segment and writable. */
2878 if ( !DescSS.Legacy.Gen.u1DescType
2879 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2880 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2881 {
2882 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2883 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2884 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2885 }
2886
2887 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2888 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2889 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2890 {
2891 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2892 uNewCpl));
2893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2894 }
2895
2896 /* Is it there? */
2897 if (!DescSS.Legacy.Gen.u1Present)
2898 {
2899 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2900 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2904 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2905
2906 /* Set the accessed bit before committing the result into SS. */
2907 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2908 {
2909 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2910 if (rcStrict != VINF_SUCCESS)
2911 return rcStrict;
2912 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2913 }
2914
2915 /* Commit SS. */
2916 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2917 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2918 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2919 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2920 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2921 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2922 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2923
2924 /* CPL has changed, update IEM before loading rest of segments. */
2925 IEM_SET_CPL(pVCpu, uNewCpl);
2926
2927 /*
2928 * Load the data segments for the new task.
2929 */
2930 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2931 if (rcStrict != VINF_SUCCESS)
2932 return rcStrict;
2933 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2934 if (rcStrict != VINF_SUCCESS)
2935 return rcStrict;
2936 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2937 if (rcStrict != VINF_SUCCESS)
2938 return rcStrict;
2939 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2940 if (rcStrict != VINF_SUCCESS)
2941 return rcStrict;
2942
2943 /*
2944 * Load the code segment for the new task.
2945 */
2946 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2947 {
2948 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2949 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2950 }
2951
2952 /* Fetch the descriptor. */
2953 IEMSELDESC DescCS;
2954 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2955 if (rcStrict != VINF_SUCCESS)
2956 {
2957 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2958 return rcStrict;
2959 }
2960
2961 /* CS must be a code segment. */
2962 if ( !DescCS.Legacy.Gen.u1DescType
2963 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2964 {
2965 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2966 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2967 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2968 }
2969
2970 /* For conforming CS, DPL must be less than or equal to the RPL. */
2971 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2972 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2973 {
2974 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2975 DescCS.Legacy.Gen.u2Dpl));
2976 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2977 }
2978
2979 /* For non-conforming CS, DPL must match RPL. */
2980 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2981 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2982 {
2983 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2984 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2985 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2986 }
2987
2988 /* Is it there? */
2989 if (!DescCS.Legacy.Gen.u1Present)
2990 {
2991 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2992 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2996 u64Base = X86DESC_BASE(&DescCS.Legacy);
2997
2998 /* Set the accessed bit before committing the result into CS. */
2999 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3000 {
3001 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3002 if (rcStrict != VINF_SUCCESS)
3003 return rcStrict;
3004 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3005 }
3006
3007 /* Commit CS. */
3008 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3009 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3010 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3011 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3012 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3013 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3015 }
3016
3017 /* Make sure the CPU mode is correct. */
3018 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3019 if (fExecNew != pVCpu->iem.s.fExec)
3020 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3021 pVCpu->iem.s.fExec = fExecNew;
3022
3023 /** @todo Debug trap. */
3024 if (fIsNewTss386 && fNewDebugTrap)
3025 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3026
3027 /*
3028 * Construct the error code masks based on what caused this task switch.
3029 * See Intel Instruction reference for INT.
3030 */
3031 uint16_t uExt;
3032 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3033 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3034 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3035 uExt = 1;
3036 else
3037 uExt = 0;
3038
3039 /*
3040 * Push any error code on to the new stack.
3041 */
3042 if (fFlags & IEM_XCPT_FLAGS_ERR)
3043 {
3044 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3045 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3046 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3047
3048 /* Check that there is sufficient space on the stack. */
3049 /** @todo Factor out segment limit checking for normal/expand down segments
3050 * into a separate function. */
3051 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3052 {
3053 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3054 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3055 {
3056 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3057 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3058 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3059 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3060 }
3061 }
3062 else
3063 {
3064 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3065 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3066 {
3067 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3068 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3069 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3070 }
3071 }
3072
3073
3074 if (fIsNewTss386)
3075 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3076 else
3077 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3078 if (rcStrict != VINF_SUCCESS)
3079 {
3080 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3081 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3082 return rcStrict;
3083 }
3084 }
3085
3086 /* Check the new EIP against the new CS limit. */
3087 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3088 {
3089 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3090 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3091 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3092 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3093 }
3094
3095 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3096 pVCpu->cpum.GstCtx.ss.Sel));
3097 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3098}
3099
3100
3101/**
3102 * Implements exceptions and interrupts for protected mode.
3103 *
3104 * @returns VBox strict status code.
3105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3106 * @param cbInstr The number of bytes to offset rIP by in the return
3107 * address.
3108 * @param u8Vector The interrupt / exception vector number.
3109 * @param fFlags The flags.
3110 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3111 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3112 */
3113static VBOXSTRICTRC
3114iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3115 uint8_t cbInstr,
3116 uint8_t u8Vector,
3117 uint32_t fFlags,
3118 uint16_t uErr,
3119 uint64_t uCr2) RT_NOEXCEPT
3120{
3121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3122
3123 /*
3124 * Read the IDT entry.
3125 */
3126 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3127 {
3128 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131 X86DESC Idte;
3132 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3133 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3134 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3135 {
3136 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3137 return rcStrict;
3138 }
3139 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3140 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3141 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3142 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3143
3144 /*
3145 * Check the descriptor type, DPL and such.
3146 * ASSUMES this is done in the same order as described for call-gate calls.
3147 */
3148 if (Idte.Gate.u1DescType)
3149 {
3150 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3151 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3152 }
3153 bool fTaskGate = false;
3154 uint8_t f32BitGate = true;
3155 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3156 switch (Idte.Gate.u4Type)
3157 {
3158 case X86_SEL_TYPE_SYS_UNDEFINED:
3159 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3160 case X86_SEL_TYPE_SYS_LDT:
3161 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3162 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3163 case X86_SEL_TYPE_SYS_UNDEFINED2:
3164 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3165 case X86_SEL_TYPE_SYS_UNDEFINED3:
3166 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3167 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3168 case X86_SEL_TYPE_SYS_UNDEFINED4:
3169 {
3170 /** @todo check what actually happens when the type is wrong...
3171 * esp. call gates. */
3172 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3173 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3174 }
3175
3176 case X86_SEL_TYPE_SYS_286_INT_GATE:
3177 f32BitGate = false;
3178 RT_FALL_THRU();
3179 case X86_SEL_TYPE_SYS_386_INT_GATE:
3180 fEflToClear |= X86_EFL_IF;
3181 break;
3182
3183 case X86_SEL_TYPE_SYS_TASK_GATE:
3184 fTaskGate = true;
3185#ifndef IEM_IMPLEMENTS_TASKSWITCH
3186 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3187#endif
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3191 f32BitGate = false;
3192 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3193 break;
3194
3195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3196 }
3197
3198 /* Check DPL against CPL if applicable. */
3199 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3200 {
3201 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3202 {
3203 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3204 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3205 }
3206 }
3207
3208 /* Is it there? */
3209 if (!Idte.Gate.u1Present)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3212 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214
3215 /* Is it a task-gate? */
3216 if (fTaskGate)
3217 {
3218 /*
3219 * Construct the error code masks based on what caused this task switch.
3220 * See Intel Instruction reference for INT.
3221 */
3222 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3223 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3224 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3225 RTSEL SelTss = Idte.Gate.u16Sel;
3226
3227 /*
3228 * Fetch the TSS descriptor in the GDT.
3229 */
3230 IEMSELDESC DescTSS;
3231 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3232 if (rcStrict != VINF_SUCCESS)
3233 {
3234 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3235 VBOXSTRICTRC_VAL(rcStrict)));
3236 return rcStrict;
3237 }
3238
3239 /* The TSS descriptor must be a system segment and be available (not busy). */
3240 if ( DescTSS.Legacy.Gen.u1DescType
3241 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3242 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3243 {
3244 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3245 u8Vector, SelTss, DescTSS.Legacy.au64));
3246 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3247 }
3248
3249 /* The TSS must be present. */
3250 if (!DescTSS.Legacy.Gen.u1Present)
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3253 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3254 }
3255
3256 /* Do the actual task switch. */
3257 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3258 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3259 fFlags, uErr, uCr2, SelTss, &DescTSS);
3260 }
3261
3262 /* A null CS is bad. */
3263 RTSEL NewCS = Idte.Gate.u16Sel;
3264 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3265 {
3266 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3267 return iemRaiseGeneralProtectionFault0(pVCpu);
3268 }
3269
3270 /* Fetch the descriptor for the new CS. */
3271 IEMSELDESC DescCS;
3272 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3273 if (rcStrict != VINF_SUCCESS)
3274 {
3275 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3276 return rcStrict;
3277 }
3278
3279 /* Must be a code segment. */
3280 if (!DescCS.Legacy.Gen.u1DescType)
3281 {
3282 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3283 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3284 }
3285 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3286 {
3287 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3288 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3289 }
3290
3291 /* Don't allow lowering the privilege level. */
3292 /** @todo Does the lowering of privileges apply to software interrupts
3293 * only? This has bearings on the more-privileged or
3294 * same-privilege stack behavior further down. A testcase would
3295 * be nice. */
3296 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3297 {
3298 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3299 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3300 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3301 }
3302
3303 /* Make sure the selector is present. */
3304 if (!DescCS.Legacy.Gen.u1Present)
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3307 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3308 }
3309
3310#ifdef LOG_ENABLED
3311 /* If software interrupt, try decode it if logging is enabled and such. */
3312 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3313 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3314 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3315#endif
3316
3317 /* Check the new EIP against the new CS limit. */
3318 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3319 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3320 ? Idte.Gate.u16OffsetLow
3321 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3322 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3323 if (uNewEip > cbLimitCS)
3324 {
3325 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3326 u8Vector, uNewEip, cbLimitCS, NewCS));
3327 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3328 }
3329 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3330
3331 /* Calc the flag image to push. */
3332 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3333 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3334 fEfl &= ~X86_EFL_RF;
3335 else
3336 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3337
3338 /* From V8086 mode only go to CPL 0. */
3339 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3340 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3341 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3342 {
3343 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3344 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3345 }
3346
3347 /*
3348 * If the privilege level changes, we need to get a new stack from the TSS.
3349 * This in turns means validating the new SS and ESP...
3350 */
3351 if (uNewCpl != IEM_GET_CPL(pVCpu))
3352 {
3353 RTSEL NewSS;
3354 uint32_t uNewEsp;
3355 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3356 if (rcStrict != VINF_SUCCESS)
3357 return rcStrict;
3358
3359 IEMSELDESC DescSS;
3360 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3361 if (rcStrict != VINF_SUCCESS)
3362 return rcStrict;
3363 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3364 if (!DescSS.Legacy.Gen.u1DefBig)
3365 {
3366 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3367 uNewEsp = (uint16_t)uNewEsp;
3368 }
3369
3370 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3371
3372 /* Check that there is sufficient space for the stack frame. */
3373 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3374 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3375 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3376 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3377
3378 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3379 {
3380 if ( uNewEsp - 1 > cbLimitSS
3381 || uNewEsp < cbStackFrame)
3382 {
3383 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3384 u8Vector, NewSS, uNewEsp, cbStackFrame));
3385 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3386 }
3387 }
3388 else
3389 {
3390 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3391 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3392 {
3393 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3394 u8Vector, NewSS, uNewEsp, cbStackFrame));
3395 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3396 }
3397 }
3398
3399 /*
3400 * Start making changes.
3401 */
3402
3403 /* Set the new CPL so that stack accesses use it. */
3404 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3405 IEM_SET_CPL(pVCpu, uNewCpl);
3406
3407 /* Create the stack frame. */
3408 uint8_t bUnmapInfoStackFrame;
3409 RTPTRUNION uStackFrame;
3410 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3411 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3412 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3413 if (rcStrict != VINF_SUCCESS)
3414 return rcStrict;
3415 if (f32BitGate)
3416 {
3417 if (fFlags & IEM_XCPT_FLAGS_ERR)
3418 *uStackFrame.pu32++ = uErr;
3419 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3420 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3421 uStackFrame.pu32[2] = fEfl;
3422 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3423 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3424 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3425 if (fEfl & X86_EFL_VM)
3426 {
3427 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3428 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3429 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3430 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3431 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3432 }
3433 }
3434 else
3435 {
3436 if (fFlags & IEM_XCPT_FLAGS_ERR)
3437 *uStackFrame.pu16++ = uErr;
3438 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3439 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3440 uStackFrame.pu16[2] = fEfl;
3441 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3442 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3443 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3444 if (fEfl & X86_EFL_VM)
3445 {
3446 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3447 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3448 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3449 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3450 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3451 }
3452 }
3453 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3454 if (rcStrict != VINF_SUCCESS)
3455 return rcStrict;
3456
3457 /* Mark the selectors 'accessed' (hope this is the correct time). */
3458 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3459 * after pushing the stack frame? (Write protect the gdt + stack to
3460 * find out.) */
3461 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3462 {
3463 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3464 if (rcStrict != VINF_SUCCESS)
3465 return rcStrict;
3466 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3467 }
3468
3469 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 /*
3478 * Start comitting the register changes (joins with the DPL=CPL branch).
3479 */
3480 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3481 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3482 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3483 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3484 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3485 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3486 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3487 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3488 * SP is loaded).
3489 * Need to check the other combinations too:
3490 * - 16-bit TSS, 32-bit handler
3491 * - 32-bit TSS, 16-bit handler */
3492 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3493 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3494 else
3495 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3496
3497 if (fEfl & X86_EFL_VM)
3498 {
3499 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3500 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3501 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3502 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3503 }
3504 }
3505 /*
3506 * Same privilege, no stack change and smaller stack frame.
3507 */
3508 else
3509 {
3510 uint64_t uNewRsp;
3511 uint8_t bUnmapInfoStackFrame;
3512 RTPTRUNION uStackFrame;
3513 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3514 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3515 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3516 if (rcStrict != VINF_SUCCESS)
3517 return rcStrict;
3518
3519 if (f32BitGate)
3520 {
3521 if (fFlags & IEM_XCPT_FLAGS_ERR)
3522 *uStackFrame.pu32++ = uErr;
3523 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3524 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3525 uStackFrame.pu32[2] = fEfl;
3526 }
3527 else
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu16++ = uErr;
3531 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu16[2] = fEfl;
3534 }
3535 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3536 if (rcStrict != VINF_SUCCESS)
3537 return rcStrict;
3538
3539 /* Mark the CS selector as 'accessed'. */
3540 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3541 {
3542 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3543 if (rcStrict != VINF_SUCCESS)
3544 return rcStrict;
3545 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3546 }
3547
3548 /*
3549 * Start committing the register changes (joins with the other branch).
3550 */
3551 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3552 }
3553
3554 /* ... register committing continues. */
3555 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3556 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3557 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3558 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3559 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3560 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3561
3562 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3563 fEfl &= ~fEflToClear;
3564 IEMMISC_SET_EFL(pVCpu, fEfl);
3565
3566 if (fFlags & IEM_XCPT_FLAGS_CR2)
3567 pVCpu->cpum.GstCtx.cr2 = uCr2;
3568
3569 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3570 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3571
3572 /* Make sure the execution flags are correct. */
3573 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3574 if (fExecNew != pVCpu->iem.s.fExec)
3575 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3576 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3577 pVCpu->iem.s.fExec = fExecNew;
3578 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3579
3580 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3581}
3582
3583
3584/**
3585 * Implements exceptions and interrupts for long mode.
3586 *
3587 * @returns VBox strict status code.
3588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3589 * @param cbInstr The number of bytes to offset rIP by in the return
3590 * address.
3591 * @param u8Vector The interrupt / exception vector number.
3592 * @param fFlags The flags.
3593 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3594 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3595 */
3596static VBOXSTRICTRC
3597iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3598 uint8_t cbInstr,
3599 uint8_t u8Vector,
3600 uint32_t fFlags,
3601 uint16_t uErr,
3602 uint64_t uCr2) RT_NOEXCEPT
3603{
3604 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605
3606 /*
3607 * Read the IDT entry.
3608 */
3609 uint16_t offIdt = (uint16_t)u8Vector << 4;
3610 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3611 {
3612 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3613 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3614 }
3615 X86DESC64 Idte;
3616#ifdef _MSC_VER /* Shut up silly compiler warning. */
3617 Idte.au64[0] = 0;
3618 Idte.au64[1] = 0;
3619#endif
3620 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3621 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3622 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3623 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3624 {
3625 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3626 return rcStrict;
3627 }
3628 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3629 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3630 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3631
3632 /*
3633 * Check the descriptor type, DPL and such.
3634 * ASSUMES this is done in the same order as described for call-gate calls.
3635 */
3636 if (Idte.Gate.u1DescType)
3637 {
3638 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3639 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3640 }
3641 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3642 switch (Idte.Gate.u4Type)
3643 {
3644 case AMD64_SEL_TYPE_SYS_INT_GATE:
3645 fEflToClear |= X86_EFL_IF;
3646 break;
3647 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3648 break;
3649
3650 default:
3651 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3652 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3653 }
3654
3655 /* Check DPL against CPL if applicable. */
3656 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3657 {
3658 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3659 {
3660 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3661 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3662 }
3663 }
3664
3665 /* Is it there? */
3666 if (!Idte.Gate.u1Present)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3669 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671
3672 /* A null CS is bad. */
3673 RTSEL NewCS = Idte.Gate.u16Sel;
3674 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3677 return iemRaiseGeneralProtectionFault0(pVCpu);
3678 }
3679
3680 /* Fetch the descriptor for the new CS. */
3681 IEMSELDESC DescCS;
3682 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3683 if (rcStrict != VINF_SUCCESS)
3684 {
3685 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3686 return rcStrict;
3687 }
3688
3689 /* Must be a 64-bit code segment. */
3690 if (!DescCS.Long.Gen.u1DescType)
3691 {
3692 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3693 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3694 }
3695 if ( !DescCS.Long.Gen.u1Long
3696 || DescCS.Long.Gen.u1DefBig
3697 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3700 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703
3704 /* Don't allow lowering the privilege level. For non-conforming CS
3705 selectors, the CS.DPL sets the privilege level the trap/interrupt
3706 handler runs at. For conforming CS selectors, the CPL remains
3707 unchanged, but the CS.DPL must be <= CPL. */
3708 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3709 * when CPU in Ring-0. Result \#GP? */
3710 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3711 {
3712 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3713 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3714 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3715 }
3716
3717
3718 /* Make sure the selector is present. */
3719 if (!DescCS.Legacy.Gen.u1Present)
3720 {
3721 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3722 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3723 }
3724
3725 /* Check that the new RIP is canonical. */
3726 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3727 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3728 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3729 if (!IEM_IS_CANONICAL(uNewRip))
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3732 return iemRaiseGeneralProtectionFault0(pVCpu);
3733 }
3734
3735 /*
3736 * If the privilege level changes or if the IST isn't zero, we need to get
3737 * a new stack from the TSS.
3738 */
3739 uint64_t uNewRsp;
3740 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3741 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3742 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3743 || Idte.Gate.u3IST != 0)
3744 {
3745 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3746 if (rcStrict != VINF_SUCCESS)
3747 return rcStrict;
3748 }
3749 else
3750 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3751 uNewRsp &= ~(uint64_t)0xf;
3752
3753 /*
3754 * Calc the flag image to push.
3755 */
3756 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3757 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3758 fEfl &= ~X86_EFL_RF;
3759 else
3760 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3761
3762 /*
3763 * Start making changes.
3764 */
3765 /* Set the new CPL so that stack accesses use it. */
3766 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3767 IEM_SET_CPL(pVCpu, uNewCpl);
3768/** @todo Setting CPL this early seems wrong as it would affect and errors we
3769 * raise accessing the stack and (?) GDT/LDT... */
3770
3771 /* Create the stack frame. */
3772 uint8_t bUnmapInfoStackFrame;
3773 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3774 RTPTRUNION uStackFrame;
3775 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3776 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3777 if (rcStrict != VINF_SUCCESS)
3778 return rcStrict;
3779
3780 if (fFlags & IEM_XCPT_FLAGS_ERR)
3781 *uStackFrame.pu64++ = uErr;
3782 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3783 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3784 uStackFrame.pu64[2] = fEfl;
3785 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3786 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3787 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3788 if (rcStrict != VINF_SUCCESS)
3789 return rcStrict;
3790
3791 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3792 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3793 * after pushing the stack frame? (Write protect the gdt + stack to
3794 * find out.) */
3795 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3796 {
3797 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3798 if (rcStrict != VINF_SUCCESS)
3799 return rcStrict;
3800 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3801 }
3802
3803 /*
3804 * Start comitting the register changes.
3805 */
3806 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3807 * hidden registers when interrupting 32-bit or 16-bit code! */
3808 if (uNewCpl != uOldCpl)
3809 {
3810 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3811 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3812 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3813 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3814 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3815 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3816 }
3817 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3818 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3819 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3820 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3822 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3823 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3824 pVCpu->cpum.GstCtx.rip = uNewRip;
3825
3826 fEfl &= ~fEflToClear;
3827 IEMMISC_SET_EFL(pVCpu, fEfl);
3828
3829 if (fFlags & IEM_XCPT_FLAGS_CR2)
3830 pVCpu->cpum.GstCtx.cr2 = uCr2;
3831
3832 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3833 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3834
3835 iemRecalcExecModeAndCplFlags(pVCpu);
3836
3837 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3838}
3839
3840
3841/**
3842 * Implements exceptions and interrupts.
3843 *
3844 * All exceptions and interrupts goes thru this function!
3845 *
3846 * @returns VBox strict status code.
3847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3848 * @param cbInstr The number of bytes to offset rIP by in the return
3849 * address.
3850 * @param u8Vector The interrupt / exception vector number.
3851 * @param fFlags The flags.
3852 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3853 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3854 */
3855VBOXSTRICTRC
3856iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3857 uint8_t cbInstr,
3858 uint8_t u8Vector,
3859 uint32_t fFlags,
3860 uint16_t uErr,
3861 uint64_t uCr2) RT_NOEXCEPT
3862{
3863 /*
3864 * Get all the state that we might need here.
3865 */
3866 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3867 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3868
3869#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3870 /*
3871 * Flush prefetch buffer
3872 */
3873 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3874#endif
3875
3876 /*
3877 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3878 */
3879 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3880 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3881 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3882 | IEM_XCPT_FLAGS_BP_INSTR
3883 | IEM_XCPT_FLAGS_ICEBP_INSTR
3884 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3885 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3886 {
3887 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3888 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3889 u8Vector = X86_XCPT_GP;
3890 uErr = 0;
3891 }
3892
3893 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3894#ifdef DBGFTRACE_ENABLED
3895 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3896 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3897 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3898#endif
3899
3900 /*
3901 * Check if DBGF wants to intercept the exception.
3902 */
3903 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3904 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3905 { /* likely */ }
3906 else
3907 {
3908 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3909 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3910 if (rcStrict != VINF_SUCCESS)
3911 return rcStrict;
3912 }
3913
3914 /*
3915 * Evaluate whether NMI blocking should be in effect.
3916 * Normally, NMI blocking is in effect whenever we inject an NMI.
3917 */
3918 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3919 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3920
3921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3922 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3923 {
3924 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3925 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3926 return rcStrict0;
3927
3928 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3929 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3930 {
3931 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3932 fBlockNmi = false;
3933 }
3934 }
3935#endif
3936
3937#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3938 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3939 {
3940 /*
3941 * If the event is being injected as part of VMRUN, it isn't subject to event
3942 * intercepts in the nested-guest. However, secondary exceptions that occur
3943 * during injection of any event -are- subject to exception intercepts.
3944 *
3945 * See AMD spec. 15.20 "Event Injection".
3946 */
3947 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3948 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3949 else
3950 {
3951 /*
3952 * Check and handle if the event being raised is intercepted.
3953 */
3954 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3955 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3956 return rcStrict0;
3957 }
3958 }
3959#endif
3960
3961 /*
3962 * Set NMI blocking if necessary.
3963 */
3964 if (fBlockNmi)
3965 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3966
3967 /*
3968 * Do recursion accounting.
3969 */
3970 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3971 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3972 if (pVCpu->iem.s.cXcptRecursions == 0)
3973 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3974 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3975 else
3976 {
3977 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3978 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3979 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3980
3981 if (pVCpu->iem.s.cXcptRecursions >= 4)
3982 {
3983#ifdef DEBUG_bird
3984 AssertFailed();
3985#endif
3986 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3987 }
3988
3989 /*
3990 * Evaluate the sequence of recurring events.
3991 */
3992 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3993 NULL /* pXcptRaiseInfo */);
3994 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3995 { /* likely */ }
3996 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3997 {
3998 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3999 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4000 u8Vector = X86_XCPT_DF;
4001 uErr = 0;
4002#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4003 /* VMX nested-guest #DF intercept needs to be checked here. */
4004 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4005 {
4006 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4007 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4008 return rcStrict0;
4009 }
4010#endif
4011 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4012 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4013 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4014 }
4015 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4016 {
4017 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4018 return iemInitiateCpuShutdown(pVCpu);
4019 }
4020 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4021 {
4022 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4023 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4024 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4025 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4026 return VERR_EM_GUEST_CPU_HANG;
4027 }
4028 else
4029 {
4030 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4031 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4032 return VERR_IEM_IPE_9;
4033 }
4034
4035 /*
4036 * The 'EXT' bit is set when an exception occurs during deliver of an external
4037 * event (such as an interrupt or earlier exception)[1]. Privileged software
4038 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4039 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4040 *
4041 * [1] - Intel spec. 6.13 "Error Code"
4042 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4043 * [3] - Intel Instruction reference for INT n.
4044 */
4045 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4046 && (fFlags & IEM_XCPT_FLAGS_ERR)
4047 && u8Vector != X86_XCPT_PF
4048 && u8Vector != X86_XCPT_DF)
4049 {
4050 uErr |= X86_TRAP_ERR_EXTERNAL;
4051 }
4052 }
4053
4054 pVCpu->iem.s.cXcptRecursions++;
4055 pVCpu->iem.s.uCurXcpt = u8Vector;
4056 pVCpu->iem.s.fCurXcpt = fFlags;
4057 pVCpu->iem.s.uCurXcptErr = uErr;
4058 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4059
4060 /*
4061 * Extensive logging.
4062 */
4063#if defined(LOG_ENABLED) && defined(IN_RING3)
4064 if (LogIs3Enabled())
4065 {
4066 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4067 char szRegs[4096];
4068 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4069 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4070 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4071 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4072 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4073 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4074 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4075 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4076 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4077 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4078 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4079 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4080 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4081 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4082 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4083 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4084 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4085 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4086 " efer=%016VR{efer}\n"
4087 " pat=%016VR{pat}\n"
4088 " sf_mask=%016VR{sf_mask}\n"
4089 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4090 " lstar=%016VR{lstar}\n"
4091 " star=%016VR{star} cstar=%016VR{cstar}\n"
4092 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4093 );
4094
4095 char szInstr[256];
4096 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4097 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4098 szInstr, sizeof(szInstr), NULL);
4099 Log3(("%s%s\n", szRegs, szInstr));
4100 }
4101#endif /* LOG_ENABLED */
4102
4103 /*
4104 * Stats.
4105 */
4106 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4107 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4108 else if (u8Vector <= X86_XCPT_LAST)
4109 {
4110 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4111 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4112 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4113 }
4114
4115 /*
4116 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4117 * to ensure that a stale TLB or paging cache entry will only cause one
4118 * spurious #PF.
4119 */
4120 if ( u8Vector == X86_XCPT_PF
4121 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4122 IEMTlbInvalidatePage(pVCpu, uCr2);
4123
4124 /*
4125 * Call the mode specific worker function.
4126 */
4127 VBOXSTRICTRC rcStrict;
4128 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4129 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4130 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4131 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4132 else
4133 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4134
4135 /* Flush the prefetch buffer. */
4136 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4137
4138 /*
4139 * Unwind.
4140 */
4141 pVCpu->iem.s.cXcptRecursions--;
4142 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4143 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4144 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4145 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4146 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4147 return rcStrict;
4148}
4149
4150#ifdef IEM_WITH_SETJMP
4151/**
4152 * See iemRaiseXcptOrInt. Will not return.
4153 */
4154DECL_NO_RETURN(void)
4155iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4156 uint8_t cbInstr,
4157 uint8_t u8Vector,
4158 uint32_t fFlags,
4159 uint16_t uErr,
4160 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4161{
4162 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4163 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4164}
4165#endif
4166
4167
4168/** \#DE - 00. */
4169VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4170{
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4172}
4173
4174
4175/** \#DB - 01.
4176 * @note This automatically clear DR7.GD. */
4177VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4178{
4179 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4180 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4182}
4183
4184
4185/** \#BR - 05. */
4186VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4189}
4190
4191
4192/** \#UD - 06. */
4193VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4196}
4197
4198
4199/** \#NM - 07. */
4200VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4203}
4204
4205
4206/** \#TS(err) - 0a. */
4207VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4210}
4211
4212
4213/** \#TS(tr) - 0a. */
4214VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4217 pVCpu->cpum.GstCtx.tr.Sel, 0);
4218}
4219
4220
4221/** \#TS(0) - 0a. */
4222VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4223{
4224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4225 0, 0);
4226}
4227
4228
4229/** \#TS(err) - 0a. */
4230VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 uSel & X86_SEL_MASK_OFF_RPL, 0);
4234}
4235
4236
4237/** \#NP(err) - 0b. */
4238VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4241}
4242
4243
4244/** \#NP(sel) - 0b. */
4245VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4246{
4247 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4248 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4249 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4250 uSel & ~X86_SEL_RPL, 0);
4251}
4252
4253
4254/** \#SS(seg) - 0c. */
4255VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4256{
4257 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4258 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4259 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4260 uSel & ~X86_SEL_RPL, 0);
4261}
4262
4263
4264/** \#SS(err) - 0c. */
4265VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4266{
4267 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4268 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4269 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4270}
4271
4272
4273/** \#GP(n) - 0d. */
4274VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4275{
4276 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4277 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4278}
4279
4280
4281/** \#GP(0) - 0d. */
4282VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4283{
4284 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4286}
4287
4288#ifdef IEM_WITH_SETJMP
4289/** \#GP(0) - 0d. */
4290DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4291{
4292 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4293 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4294}
4295#endif
4296
4297
4298/** \#GP(sel) - 0d. */
4299VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4300{
4301 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4302 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4303 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4304 Sel & ~X86_SEL_RPL, 0);
4305}
4306
4307
4308/** \#GP(0) - 0d. */
4309VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4310{
4311 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4312 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4313}
4314
4315
4316/** \#GP(sel) - 0d. */
4317VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4318{
4319 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4320 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4321 NOREF(iSegReg); NOREF(fAccess);
4322 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4323 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4324}
4325
4326#ifdef IEM_WITH_SETJMP
4327/** \#GP(sel) - 0d, longjmp. */
4328DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4329{
4330 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4331 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4332 NOREF(iSegReg); NOREF(fAccess);
4333 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4334 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4335}
4336#endif
4337
4338/** \#GP(sel) - 0d. */
4339VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4340{
4341 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4342 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4343 NOREF(Sel);
4344 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4345}
4346
4347#ifdef IEM_WITH_SETJMP
4348/** \#GP(sel) - 0d, longjmp. */
4349DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4350{
4351 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4352 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4353 NOREF(Sel);
4354 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4355}
4356#endif
4357
4358
4359/** \#GP(sel) - 0d. */
4360VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4361{
4362 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4363 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4364 NOREF(iSegReg); NOREF(fAccess);
4365 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4366}
4367
4368#ifdef IEM_WITH_SETJMP
4369/** \#GP(sel) - 0d, longjmp. */
4370DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4371{
4372 NOREF(iSegReg); NOREF(fAccess);
4373 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4374}
4375#endif
4376
4377
4378/** \#PF(n) - 0e. */
4379VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4380{
4381 uint16_t uErr;
4382 switch (rc)
4383 {
4384 case VERR_PAGE_NOT_PRESENT:
4385 case VERR_PAGE_TABLE_NOT_PRESENT:
4386 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4387 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4388 uErr = 0;
4389 break;
4390
4391 default:
4392 AssertMsgFailed(("%Rrc\n", rc));
4393 RT_FALL_THRU();
4394 case VERR_ACCESS_DENIED:
4395 uErr = X86_TRAP_PF_P;
4396 break;
4397
4398 /** @todo reserved */
4399 }
4400
4401 if (IEM_GET_CPL(pVCpu) == 3)
4402 uErr |= X86_TRAP_PF_US;
4403
4404 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4405 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4406 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4407 uErr |= X86_TRAP_PF_ID;
4408
4409#if 0 /* This is so much non-sense, really. Why was it done like that? */
4410 /* Note! RW access callers reporting a WRITE protection fault, will clear
4411 the READ flag before calling. So, read-modify-write accesses (RW)
4412 can safely be reported as READ faults. */
4413 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4414 uErr |= X86_TRAP_PF_RW;
4415#else
4416 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4417 {
4418 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4419 /// (regardless of outcome of the comparison in the latter case).
4420 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4421 uErr |= X86_TRAP_PF_RW;
4422 }
4423#endif
4424
4425 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4426 of the memory operand rather than at the start of it. (Not sure what
4427 happens if it crosses a page boundrary.) The current heuristics for
4428 this is to report the #PF for the last byte if the access is more than
4429 64 bytes. This is probably not correct, but we can work that out later,
4430 main objective now is to get FXSAVE to work like for real hardware and
4431 make bs3-cpu-basic2 work. */
4432 if (cbAccess <= 64)
4433 { /* likely*/ }
4434 else
4435 GCPtrWhere += cbAccess - 1;
4436
4437 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4438 uErr, GCPtrWhere);
4439}
4440
4441#ifdef IEM_WITH_SETJMP
4442/** \#PF(n) - 0e, longjmp. */
4443DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4444 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4445{
4446 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4447}
4448#endif
4449
4450
4451/** \#MF(0) - 10. */
4452VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4453{
4454 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4455 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4456
4457 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4458 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4459 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4460}
4461
4462
4463/** \#AC(0) - 11. */
4464VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4465{
4466 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4467}
4468
4469#ifdef IEM_WITH_SETJMP
4470/** \#AC(0) - 11, longjmp. */
4471DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4472{
4473 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4474}
4475#endif
4476
4477
4478/** \#XF(0)/\#XM(0) - 19. */
4479VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4480{
4481 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4482}
4483
4484
4485/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4486IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4487{
4488 NOREF(cbInstr);
4489 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4490}
4491
4492
4493/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4494IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4495{
4496 NOREF(cbInstr);
4497 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4498}
4499
4500
4501/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4502IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4503{
4504 NOREF(cbInstr);
4505 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4506}
4507
4508
4509/** @} */
4510
4511/** @name Common opcode decoders.
4512 * @{
4513 */
4514//#include <iprt/mem.h>
4515
4516/**
4517 * Used to add extra details about a stub case.
4518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4519 */
4520void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4521{
4522#if defined(LOG_ENABLED) && defined(IN_RING3)
4523 PVM pVM = pVCpu->CTX_SUFF(pVM);
4524 char szRegs[4096];
4525 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4526 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4527 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4528 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4529 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4530 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4531 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4532 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4533 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4534 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4535 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4536 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4537 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4538 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4539 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4540 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4541 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4542 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4543 " efer=%016VR{efer}\n"
4544 " pat=%016VR{pat}\n"
4545 " sf_mask=%016VR{sf_mask}\n"
4546 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4547 " lstar=%016VR{lstar}\n"
4548 " star=%016VR{star} cstar=%016VR{cstar}\n"
4549 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4550 );
4551
4552 char szInstr[256];
4553 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4554 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4555 szInstr, sizeof(szInstr), NULL);
4556
4557 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4558#else
4559 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4560#endif
4561}
4562
4563/** @} */
4564
4565
4566
4567/** @name Register Access.
4568 * @{
4569 */
4570
4571/**
4572 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4573 *
4574 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4575 * segment limit.
4576 *
4577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4578 * @param cbInstr Instruction size.
4579 * @param offNextInstr The offset of the next instruction.
4580 * @param enmEffOpSize Effective operand size.
4581 */
4582VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4583 IEMMODE enmEffOpSize) RT_NOEXCEPT
4584{
4585 switch (enmEffOpSize)
4586 {
4587 case IEMMODE_16BIT:
4588 {
4589 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4590 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4591 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4592 pVCpu->cpum.GstCtx.rip = uNewIp;
4593 else
4594 return iemRaiseGeneralProtectionFault0(pVCpu);
4595 break;
4596 }
4597
4598 case IEMMODE_32BIT:
4599 {
4600 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4601 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4602
4603 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4604 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4605 pVCpu->cpum.GstCtx.rip = uNewEip;
4606 else
4607 return iemRaiseGeneralProtectionFault0(pVCpu);
4608 break;
4609 }
4610
4611 case IEMMODE_64BIT:
4612 {
4613 Assert(IEM_IS_64BIT_CODE(pVCpu));
4614
4615 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4616 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4617 pVCpu->cpum.GstCtx.rip = uNewRip;
4618 else
4619 return iemRaiseGeneralProtectionFault0(pVCpu);
4620 break;
4621 }
4622
4623 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4624 }
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = cbInstr;
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4640 *
4641 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4642 * segment limit.
4643 *
4644 * @returns Strict VBox status code.
4645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4646 * @param cbInstr Instruction size.
4647 * @param offNextInstr The offset of the next instruction.
4648 */
4649VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4650{
4651 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4652
4653 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4654 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4655 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4656 pVCpu->cpum.GstCtx.rip = uNewIp;
4657 else
4658 return iemRaiseGeneralProtectionFault0(pVCpu);
4659
4660#ifndef IEM_WITH_CODE_TLB
4661 /* Flush the prefetch buffer. */
4662 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4663#endif
4664
4665 /*
4666 * Clear RF and finish the instruction (maybe raise #DB).
4667 */
4668 return iemRegFinishClearingRF(pVCpu);
4669}
4670
4671
4672/**
4673 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4674 *
4675 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4676 * segment limit.
4677 *
4678 * @returns Strict VBox status code.
4679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4680 * @param cbInstr Instruction size.
4681 * @param offNextInstr The offset of the next instruction.
4682 * @param enmEffOpSize Effective operand size.
4683 */
4684VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4685 IEMMODE enmEffOpSize) RT_NOEXCEPT
4686{
4687 if (enmEffOpSize == IEMMODE_32BIT)
4688 {
4689 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4690
4691 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4692 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4693 pVCpu->cpum.GstCtx.rip = uNewEip;
4694 else
4695 return iemRaiseGeneralProtectionFault0(pVCpu);
4696 }
4697 else
4698 {
4699 Assert(enmEffOpSize == IEMMODE_64BIT);
4700
4701 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4702 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4703 pVCpu->cpum.GstCtx.rip = uNewRip;
4704 else
4705 return iemRaiseGeneralProtectionFault0(pVCpu);
4706 }
4707
4708#ifndef IEM_WITH_CODE_TLB
4709 /* Flush the prefetch buffer. */
4710 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4711#endif
4712
4713 /*
4714 * Clear RF and finish the instruction (maybe raise #DB).
4715 */
4716 return iemRegFinishClearingRF(pVCpu);
4717}
4718
4719/** @} */
4720
4721
4722/** @name FPU access and helpers.
4723 *
4724 * @{
4725 */
4726
4727/**
4728 * Updates the x87.DS and FPUDP registers.
4729 *
4730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4731 * @param pFpuCtx The FPU context.
4732 * @param iEffSeg The effective segment register.
4733 * @param GCPtrEff The effective address relative to @a iEffSeg.
4734 */
4735DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4736{
4737 RTSEL sel;
4738 switch (iEffSeg)
4739 {
4740 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4741 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4742 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4743 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4744 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4745 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4746 default:
4747 AssertMsgFailed(("%d\n", iEffSeg));
4748 sel = pVCpu->cpum.GstCtx.ds.Sel;
4749 }
4750 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4751 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4752 {
4753 pFpuCtx->DS = 0;
4754 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4755 }
4756 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4757 {
4758 pFpuCtx->DS = sel;
4759 pFpuCtx->FPUDP = GCPtrEff;
4760 }
4761 else
4762 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4763}
4764
4765
4766/**
4767 * Rotates the stack registers in the push direction.
4768 *
4769 * @param pFpuCtx The FPU context.
4770 * @remarks This is a complete waste of time, but fxsave stores the registers in
4771 * stack order.
4772 */
4773DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4774{
4775 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4776 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4777 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4778 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4779 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4780 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4781 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4782 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4783 pFpuCtx->aRegs[0].r80 = r80Tmp;
4784}
4785
4786
4787/**
4788 * Rotates the stack registers in the pop direction.
4789 *
4790 * @param pFpuCtx The FPU context.
4791 * @remarks This is a complete waste of time, but fxsave stores the registers in
4792 * stack order.
4793 */
4794DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4795{
4796 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4797 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4798 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4799 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4800 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4801 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4802 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4803 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4804 pFpuCtx->aRegs[7].r80 = r80Tmp;
4805}
4806
4807
4808/**
4809 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4810 * exception prevents it.
4811 *
4812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4813 * @param pResult The FPU operation result to push.
4814 * @param pFpuCtx The FPU context.
4815 */
4816static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4817{
4818 /* Update FSW and bail if there are pending exceptions afterwards. */
4819 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4820 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4821 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4822 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4823 {
4824 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4825 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4827 pFpuCtx->FSW = fFsw;
4828 return;
4829 }
4830
4831 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4832 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4833 {
4834 /* All is fine, push the actual value. */
4835 pFpuCtx->FTW |= RT_BIT(iNewTop);
4836 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4837 }
4838 else if (pFpuCtx->FCW & X86_FCW_IM)
4839 {
4840 /* Masked stack overflow, push QNaN. */
4841 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4842 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4843 }
4844 else
4845 {
4846 /* Raise stack overflow, don't push anything. */
4847 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4848 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4849 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4851 return;
4852 }
4853
4854 fFsw &= ~X86_FSW_TOP_MASK;
4855 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4856 pFpuCtx->FSW = fFsw;
4857
4858 iemFpuRotateStackPush(pFpuCtx);
4859 RT_NOREF(pVCpu);
4860}
4861
4862
4863/**
4864 * Stores a result in a FPU register and updates the FSW and FTW.
4865 *
4866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4867 * @param pFpuCtx The FPU context.
4868 * @param pResult The result to store.
4869 * @param iStReg Which FPU register to store it in.
4870 */
4871static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4872{
4873 Assert(iStReg < 8);
4874 uint16_t fNewFsw = pFpuCtx->FSW;
4875 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4876 fNewFsw &= ~X86_FSW_C_MASK;
4877 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4878 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4879 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4880 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4881 pFpuCtx->FSW = fNewFsw;
4882 pFpuCtx->FTW |= RT_BIT(iReg);
4883 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4884 RT_NOREF(pVCpu);
4885}
4886
4887
4888/**
4889 * Only updates the FPU status word (FSW) with the result of the current
4890 * instruction.
4891 *
4892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4893 * @param pFpuCtx The FPU context.
4894 * @param u16FSW The FSW output of the current instruction.
4895 */
4896static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4897{
4898 uint16_t fNewFsw = pFpuCtx->FSW;
4899 fNewFsw &= ~X86_FSW_C_MASK;
4900 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4901 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4902 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4903 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4904 pFpuCtx->FSW = fNewFsw;
4905 RT_NOREF(pVCpu);
4906}
4907
4908
4909/**
4910 * Pops one item off the FPU stack if no pending exception prevents it.
4911 *
4912 * @param pFpuCtx The FPU context.
4913 */
4914static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4915{
4916 /* Check pending exceptions. */
4917 uint16_t uFSW = pFpuCtx->FSW;
4918 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4919 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4920 return;
4921
4922 /* TOP--. */
4923 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4924 uFSW &= ~X86_FSW_TOP_MASK;
4925 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4926 pFpuCtx->FSW = uFSW;
4927
4928 /* Mark the previous ST0 as empty. */
4929 iOldTop >>= X86_FSW_TOP_SHIFT;
4930 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4931
4932 /* Rotate the registers. */
4933 iemFpuRotateStackPop(pFpuCtx);
4934}
4935
4936
4937/**
4938 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4939 *
4940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4941 * @param pResult The FPU operation result to push.
4942 * @param uFpuOpcode The FPU opcode value.
4943 */
4944void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4945{
4946 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4947 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4948 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4949}
4950
4951
4952/**
4953 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4954 * and sets FPUDP and FPUDS.
4955 *
4956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4957 * @param pResult The FPU operation result to push.
4958 * @param iEffSeg The effective segment register.
4959 * @param GCPtrEff The effective address relative to @a iEffSeg.
4960 * @param uFpuOpcode The FPU opcode value.
4961 */
4962void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4963 uint16_t uFpuOpcode) RT_NOEXCEPT
4964{
4965 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4966 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4967 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4968 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4969}
4970
4971
4972/**
4973 * Replace ST0 with the first value and push the second onto the FPU stack,
4974 * unless a pending exception prevents it.
4975 *
4976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4977 * @param pResult The FPU operation result to store and push.
4978 * @param uFpuOpcode The FPU opcode value.
4979 */
4980void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4981{
4982 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4983 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4984
4985 /* Update FSW and bail if there are pending exceptions afterwards. */
4986 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4987 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4988 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4989 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4990 {
4991 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4992 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4993 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4994 pFpuCtx->FSW = fFsw;
4995 return;
4996 }
4997
4998 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4999 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5000 {
5001 /* All is fine, push the actual value. */
5002 pFpuCtx->FTW |= RT_BIT(iNewTop);
5003 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5004 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5005 }
5006 else if (pFpuCtx->FCW & X86_FCW_IM)
5007 {
5008 /* Masked stack overflow, push QNaN. */
5009 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5010 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5011 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5012 }
5013 else
5014 {
5015 /* Raise stack overflow, don't push anything. */
5016 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5017 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5018 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5019 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5020 return;
5021 }
5022
5023 fFsw &= ~X86_FSW_TOP_MASK;
5024 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5025 pFpuCtx->FSW = fFsw;
5026
5027 iemFpuRotateStackPush(pFpuCtx);
5028}
5029
5030
5031/**
5032 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5033 * FOP.
5034 *
5035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5036 * @param pResult The result to store.
5037 * @param iStReg Which FPU register to store it in.
5038 * @param uFpuOpcode The FPU opcode value.
5039 */
5040void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5041{
5042 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5043 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5044 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5045}
5046
5047
5048/**
5049 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5050 * FOP, and then pops the stack.
5051 *
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param pResult The result to store.
5054 * @param iStReg Which FPU register to store it in.
5055 * @param uFpuOpcode The FPU opcode value.
5056 */
5057void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5058{
5059 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5060 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5061 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5062 iemFpuMaybePopOne(pFpuCtx);
5063}
5064
5065
5066/**
5067 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5068 * FPUDP, and FPUDS.
5069 *
5070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5071 * @param pResult The result to store.
5072 * @param iStReg Which FPU register to store it in.
5073 * @param iEffSeg The effective memory operand selector register.
5074 * @param GCPtrEff The effective memory operand offset.
5075 * @param uFpuOpcode The FPU opcode value.
5076 */
5077void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5078 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5079{
5080 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5081 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5082 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5083 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5084}
5085
5086
5087/**
5088 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5089 * FPUDP, and FPUDS, and then pops the stack.
5090 *
5091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5092 * @param pResult The result to store.
5093 * @param iStReg Which FPU register to store it in.
5094 * @param iEffSeg The effective memory operand selector register.
5095 * @param GCPtrEff The effective memory operand offset.
5096 * @param uFpuOpcode The FPU opcode value.
5097 */
5098void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5099 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5100{
5101 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5102 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5103 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5104 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5105 iemFpuMaybePopOne(pFpuCtx);
5106}
5107
5108
5109/**
5110 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5111 *
5112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5113 * @param uFpuOpcode The FPU opcode value.
5114 */
5115void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5116{
5117 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5118 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5119}
5120
5121
5122/**
5123 * Updates the FSW, FOP, FPUIP, and FPUCS.
5124 *
5125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5126 * @param u16FSW The FSW from the current instruction.
5127 * @param uFpuOpcode The FPU opcode value.
5128 */
5129void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5130{
5131 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5132 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5133 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5134}
5135
5136
5137/**
5138 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5139 *
5140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5141 * @param u16FSW The FSW from the current instruction.
5142 * @param uFpuOpcode The FPU opcode value.
5143 */
5144void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5145{
5146 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5147 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5148 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5149 iemFpuMaybePopOne(pFpuCtx);
5150}
5151
5152
5153/**
5154 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5155 *
5156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5157 * @param u16FSW The FSW from the current instruction.
5158 * @param iEffSeg The effective memory operand selector register.
5159 * @param GCPtrEff The effective memory operand offset.
5160 * @param uFpuOpcode The FPU opcode value.
5161 */
5162void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5163{
5164 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5165 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5166 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5167 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5168}
5169
5170
5171/**
5172 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5173 *
5174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5175 * @param u16FSW The FSW from the current instruction.
5176 * @param uFpuOpcode The FPU opcode value.
5177 */
5178void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5179{
5180 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5181 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5182 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5183 iemFpuMaybePopOne(pFpuCtx);
5184 iemFpuMaybePopOne(pFpuCtx);
5185}
5186
5187
5188/**
5189 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5190 *
5191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5192 * @param u16FSW The FSW from the current instruction.
5193 * @param iEffSeg The effective memory operand selector register.
5194 * @param GCPtrEff The effective memory operand offset.
5195 * @param uFpuOpcode The FPU opcode value.
5196 */
5197void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5198 uint16_t uFpuOpcode) RT_NOEXCEPT
5199{
5200 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5201 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5202 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5203 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5204 iemFpuMaybePopOne(pFpuCtx);
5205}
5206
5207
5208/**
5209 * Worker routine for raising an FPU stack underflow exception.
5210 *
5211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5212 * @param pFpuCtx The FPU context.
5213 * @param iStReg The stack register being accessed.
5214 */
5215static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5216{
5217 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5218 if (pFpuCtx->FCW & X86_FCW_IM)
5219 {
5220 /* Masked underflow. */
5221 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5222 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5223 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5224 if (iStReg != UINT8_MAX)
5225 {
5226 pFpuCtx->FTW |= RT_BIT(iReg);
5227 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5228 }
5229 }
5230 else
5231 {
5232 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5233 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5234 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5236 }
5237 RT_NOREF(pVCpu);
5238}
5239
5240
5241/**
5242 * Raises a FPU stack underflow exception.
5243 *
5244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5245 * @param iStReg The destination register that should be loaded
5246 * with QNaN if \#IS is not masked. Specify
5247 * UINT8_MAX if none (like for fcom).
5248 * @param uFpuOpcode The FPU opcode value.
5249 */
5250void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5251{
5252 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5253 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5254 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5255}
5256
5257
5258void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5259{
5260 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5261 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5262 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5263 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5264}
5265
5266
5267void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5268{
5269 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5270 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5271 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5272 iemFpuMaybePopOne(pFpuCtx);
5273}
5274
5275
5276void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5277 uint16_t uFpuOpcode) RT_NOEXCEPT
5278{
5279 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5280 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5281 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5282 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5283 iemFpuMaybePopOne(pFpuCtx);
5284}
5285
5286
5287void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5288{
5289 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5290 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5291 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5292 iemFpuMaybePopOne(pFpuCtx);
5293 iemFpuMaybePopOne(pFpuCtx);
5294}
5295
5296
5297void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5298{
5299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5300 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5301
5302 if (pFpuCtx->FCW & X86_FCW_IM)
5303 {
5304 /* Masked overflow - Push QNaN. */
5305 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5306 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5307 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5308 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5309 pFpuCtx->FTW |= RT_BIT(iNewTop);
5310 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5311 iemFpuRotateStackPush(pFpuCtx);
5312 }
5313 else
5314 {
5315 /* Exception pending - don't change TOP or the register stack. */
5316 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5317 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5318 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5319 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5320 }
5321}
5322
5323
5324void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5325{
5326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5327 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5328
5329 if (pFpuCtx->FCW & X86_FCW_IM)
5330 {
5331 /* Masked overflow - Push QNaN. */
5332 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5333 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5334 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5335 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5336 pFpuCtx->FTW |= RT_BIT(iNewTop);
5337 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5338 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5339 iemFpuRotateStackPush(pFpuCtx);
5340 }
5341 else
5342 {
5343 /* Exception pending - don't change TOP or the register stack. */
5344 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5345 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5346 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5347 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5348 }
5349}
5350
5351
5352/**
5353 * Worker routine for raising an FPU stack overflow exception on a push.
5354 *
5355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5356 * @param pFpuCtx The FPU context.
5357 */
5358static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5359{
5360 if (pFpuCtx->FCW & X86_FCW_IM)
5361 {
5362 /* Masked overflow. */
5363 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5364 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5365 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5366 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5367 pFpuCtx->FTW |= RT_BIT(iNewTop);
5368 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5369 iemFpuRotateStackPush(pFpuCtx);
5370 }
5371 else
5372 {
5373 /* Exception pending - don't change TOP or the register stack. */
5374 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5375 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5376 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5377 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5378 }
5379 RT_NOREF(pVCpu);
5380}
5381
5382
5383/**
5384 * Raises a FPU stack overflow exception on a push.
5385 *
5386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5387 * @param uFpuOpcode The FPU opcode value.
5388 */
5389void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5390{
5391 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5392 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5393 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5394}
5395
5396
5397/**
5398 * Raises a FPU stack overflow exception on a push with a memory operand.
5399 *
5400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5401 * @param iEffSeg The effective memory operand selector register.
5402 * @param GCPtrEff The effective memory operand offset.
5403 * @param uFpuOpcode The FPU opcode value.
5404 */
5405void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5406{
5407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5409 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5410 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5411}
5412
5413/** @} */
5414
5415
5416/** @name SSE+AVX SIMD access and helpers.
5417 *
5418 * @{
5419 */
5420/**
5421 * Stores a result in a SIMD XMM register, updates the MXCSR.
5422 *
5423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5424 * @param pResult The result to store.
5425 * @param iXmmReg Which SIMD XMM register to store the result in.
5426 */
5427void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5428{
5429 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5430 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5431
5432 /* The result is only updated if there is no unmasked exception pending. */
5433 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5434 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5435 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5436}
5437
5438
5439/**
5440 * Updates the MXCSR.
5441 *
5442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5443 * @param fMxcsr The new MXCSR value.
5444 */
5445void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5446{
5447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5448 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5449}
5450/** @} */
5451
5452
5453/** @name Memory access.
5454 *
5455 * @{
5456 */
5457
5458#undef LOG_GROUP
5459#define LOG_GROUP LOG_GROUP_IEM_MEM
5460
5461/**
5462 * Updates the IEMCPU::cbWritten counter if applicable.
5463 *
5464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5465 * @param fAccess The access being accounted for.
5466 * @param cbMem The access size.
5467 */
5468DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5469{
5470 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5471 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5472 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5473}
5474
5475
5476/**
5477 * Applies the segment limit, base and attributes.
5478 *
5479 * This may raise a \#GP or \#SS.
5480 *
5481 * @returns VBox strict status code.
5482 *
5483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5484 * @param fAccess The kind of access which is being performed.
5485 * @param iSegReg The index of the segment register to apply.
5486 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5487 * TSS, ++).
5488 * @param cbMem The access size.
5489 * @param pGCPtrMem Pointer to the guest memory address to apply
5490 * segmentation to. Input and output parameter.
5491 */
5492VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5493{
5494 if (iSegReg == UINT8_MAX)
5495 return VINF_SUCCESS;
5496
5497 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5498 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5499 switch (IEM_GET_CPU_MODE(pVCpu))
5500 {
5501 case IEMMODE_16BIT:
5502 case IEMMODE_32BIT:
5503 {
5504 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5505 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5506
5507 if ( pSel->Attr.n.u1Present
5508 && !pSel->Attr.n.u1Unusable)
5509 {
5510 Assert(pSel->Attr.n.u1DescType);
5511 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5512 {
5513 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5514 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5515 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5516
5517 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5518 {
5519 /** @todo CPL check. */
5520 }
5521
5522 /*
5523 * There are two kinds of data selectors, normal and expand down.
5524 */
5525 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5526 {
5527 if ( GCPtrFirst32 > pSel->u32Limit
5528 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5529 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5530 }
5531 else
5532 {
5533 /*
5534 * The upper boundary is defined by the B bit, not the G bit!
5535 */
5536 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5537 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5538 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5539 }
5540 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5541 }
5542 else
5543 {
5544 /*
5545 * Code selector and usually be used to read thru, writing is
5546 * only permitted in real and V8086 mode.
5547 */
5548 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5549 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5550 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5551 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5552 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5553
5554 if ( GCPtrFirst32 > pSel->u32Limit
5555 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5556 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5557
5558 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5559 {
5560 /** @todo CPL check. */
5561 }
5562
5563 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5564 }
5565 }
5566 else
5567 return iemRaiseGeneralProtectionFault0(pVCpu);
5568 return VINF_SUCCESS;
5569 }
5570
5571 case IEMMODE_64BIT:
5572 {
5573 RTGCPTR GCPtrMem = *pGCPtrMem;
5574 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5575 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5576
5577 Assert(cbMem >= 1);
5578 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5579 return VINF_SUCCESS;
5580 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5581 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5582 return iemRaiseGeneralProtectionFault0(pVCpu);
5583 }
5584
5585 default:
5586 AssertFailedReturn(VERR_IEM_IPE_7);
5587 }
5588}
5589
5590
5591/**
5592 * Translates a virtual address to a physical physical address and checks if we
5593 * can access the page as specified.
5594 *
5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5596 * @param GCPtrMem The virtual address.
5597 * @param cbAccess The access size, for raising \#PF correctly for
5598 * FXSAVE and such.
5599 * @param fAccess The intended access.
5600 * @param pGCPhysMem Where to return the physical address.
5601 */
5602VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5603 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5604{
5605 /** @todo Need a different PGM interface here. We're currently using
5606 * generic / REM interfaces. this won't cut it for R0. */
5607 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5608 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5609 * here. */
5610 PGMPTWALK Walk;
5611 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5612 if (RT_FAILURE(rc))
5613 {
5614 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5615 /** @todo Check unassigned memory in unpaged mode. */
5616 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5618 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5619 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5620#endif
5621 *pGCPhysMem = NIL_RTGCPHYS;
5622 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5623 }
5624
5625 /* If the page is writable and does not have the no-exec bit set, all
5626 access is allowed. Otherwise we'll have to check more carefully... */
5627 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5628 {
5629 /* Write to read only memory? */
5630 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5631 && !(Walk.fEffective & X86_PTE_RW)
5632 && ( ( IEM_GET_CPL(pVCpu) == 3
5633 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5634 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5635 {
5636 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5637 *pGCPhysMem = NIL_RTGCPHYS;
5638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5639 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5640 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5641#endif
5642 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5643 }
5644
5645 /* Kernel memory accessed by userland? */
5646 if ( !(Walk.fEffective & X86_PTE_US)
5647 && IEM_GET_CPL(pVCpu) == 3
5648 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5649 {
5650 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5651 *pGCPhysMem = NIL_RTGCPHYS;
5652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5653 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5654 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5655#endif
5656 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5657 }
5658
5659 /* Executing non-executable memory? */
5660 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5661 && (Walk.fEffective & X86_PTE_PAE_NX)
5662 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5663 {
5664 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5665 *pGCPhysMem = NIL_RTGCPHYS;
5666#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5667 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5668 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5669#endif
5670 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5671 VERR_ACCESS_DENIED);
5672 }
5673 }
5674
5675 /*
5676 * Set the dirty / access flags.
5677 * ASSUMES this is set when the address is translated rather than on committ...
5678 */
5679 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5680 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5681 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5682 {
5683 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5684 AssertRC(rc2);
5685 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5686 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5687 }
5688
5689 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5690 *pGCPhysMem = GCPhys;
5691 return VINF_SUCCESS;
5692}
5693
5694
5695/**
5696 * Looks up a memory mapping entry.
5697 *
5698 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5700 * @param pvMem The memory address.
5701 * @param fAccess The access to.
5702 */
5703DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5704{
5705 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5706 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5707 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5708 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5709 return 0;
5710 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5711 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5712 return 1;
5713 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5714 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5715 return 2;
5716 return VERR_NOT_FOUND;
5717}
5718
5719
5720/**
5721 * Finds a free memmap entry when using iNextMapping doesn't work.
5722 *
5723 * @returns Memory mapping index, 1024 on failure.
5724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5725 */
5726static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5727{
5728 /*
5729 * The easy case.
5730 */
5731 if (pVCpu->iem.s.cActiveMappings == 0)
5732 {
5733 pVCpu->iem.s.iNextMapping = 1;
5734 return 0;
5735 }
5736
5737 /* There should be enough mappings for all instructions. */
5738 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5739
5740 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5741 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5742 return i;
5743
5744 AssertFailedReturn(1024);
5745}
5746
5747
5748/**
5749 * Commits a bounce buffer that needs writing back and unmaps it.
5750 *
5751 * @returns Strict VBox status code.
5752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5753 * @param iMemMap The index of the buffer to commit.
5754 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5755 * Always false in ring-3, obviously.
5756 */
5757static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5758{
5759 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5760 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5761#ifdef IN_RING3
5762 Assert(!fPostponeFail);
5763 RT_NOREF_PV(fPostponeFail);
5764#endif
5765
5766 /*
5767 * Do the writing.
5768 */
5769 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5770 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5771 {
5772 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5773 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5774 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5775 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5776 {
5777 /*
5778 * Carefully and efficiently dealing with access handler return
5779 * codes make this a little bloated.
5780 */
5781 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5782 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5783 pbBuf,
5784 cbFirst,
5785 PGMACCESSORIGIN_IEM);
5786 if (rcStrict == VINF_SUCCESS)
5787 {
5788 if (cbSecond)
5789 {
5790 rcStrict = PGMPhysWrite(pVM,
5791 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5792 pbBuf + cbFirst,
5793 cbSecond,
5794 PGMACCESSORIGIN_IEM);
5795 if (rcStrict == VINF_SUCCESS)
5796 { /* nothing */ }
5797 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5798 {
5799 LogEx(LOG_GROUP_IEM,
5800 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5801 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5803 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5804 }
5805#ifndef IN_RING3
5806 else if (fPostponeFail)
5807 {
5808 LogEx(LOG_GROUP_IEM,
5809 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5813 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5814 return iemSetPassUpStatus(pVCpu, rcStrict);
5815 }
5816#endif
5817 else
5818 {
5819 LogEx(LOG_GROUP_IEM,
5820 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5821 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5823 return rcStrict;
5824 }
5825 }
5826 }
5827 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5828 {
5829 if (!cbSecond)
5830 {
5831 LogEx(LOG_GROUP_IEM,
5832 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5834 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5835 }
5836 else
5837 {
5838 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5840 pbBuf + cbFirst,
5841 cbSecond,
5842 PGMACCESSORIGIN_IEM);
5843 if (rcStrict2 == VINF_SUCCESS)
5844 {
5845 LogEx(LOG_GROUP_IEM,
5846 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5852 {
5853 LogEx(LOG_GROUP_IEM,
5854 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5857 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5858 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5859 }
5860#ifndef IN_RING3
5861 else if (fPostponeFail)
5862 {
5863 LogEx(LOG_GROUP_IEM,
5864 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5865 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5867 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5868 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5869 return iemSetPassUpStatus(pVCpu, rcStrict);
5870 }
5871#endif
5872 else
5873 {
5874 LogEx(LOG_GROUP_IEM,
5875 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5878 return rcStrict2;
5879 }
5880 }
5881 }
5882#ifndef IN_RING3
5883 else if (fPostponeFail)
5884 {
5885 LogEx(LOG_GROUP_IEM,
5886 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5889 if (!cbSecond)
5890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5891 else
5892 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5893 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5894 return iemSetPassUpStatus(pVCpu, rcStrict);
5895 }
5896#endif
5897 else
5898 {
5899 LogEx(LOG_GROUP_IEM,
5900 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5903 return rcStrict;
5904 }
5905 }
5906 else
5907 {
5908 /*
5909 * No access handlers, much simpler.
5910 */
5911 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5912 if (RT_SUCCESS(rc))
5913 {
5914 if (cbSecond)
5915 {
5916 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5917 if (RT_SUCCESS(rc))
5918 { /* likely */ }
5919 else
5920 {
5921 LogEx(LOG_GROUP_IEM,
5922 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5925 return rc;
5926 }
5927 }
5928 }
5929 else
5930 {
5931 LogEx(LOG_GROUP_IEM,
5932 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5934 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5935 return rc;
5936 }
5937 }
5938 }
5939
5940#if defined(IEM_LOG_MEMORY_WRITES)
5941 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5942 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5943 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5944 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5945 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5946 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5947
5948 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5949 g_cbIemWrote = cbWrote;
5950 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5951#endif
5952
5953 /*
5954 * Free the mapping entry.
5955 */
5956 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5957 Assert(pVCpu->iem.s.cActiveMappings != 0);
5958 pVCpu->iem.s.cActiveMappings--;
5959 return VINF_SUCCESS;
5960}
5961
5962
5963/**
5964 * iemMemMap worker that deals with a request crossing pages.
5965 */
5966static VBOXSTRICTRC
5967iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5968 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5969{
5970 Assert(cbMem <= GUEST_PAGE_SIZE);
5971
5972 /*
5973 * Do the address translations.
5974 */
5975 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5976 RTGCPHYS GCPhysFirst;
5977 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5978 if (rcStrict != VINF_SUCCESS)
5979 return rcStrict;
5980 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5981
5982 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5983 RTGCPHYS GCPhysSecond;
5984 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5985 cbSecondPage, fAccess, &GCPhysSecond);
5986 if (rcStrict != VINF_SUCCESS)
5987 return rcStrict;
5988 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5989 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5990
5991 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5992
5993 /*
5994 * Read in the current memory content if it's a read, execute or partial
5995 * write access.
5996 */
5997 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5998
5999 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6000 {
6001 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6002 {
6003 /*
6004 * Must carefully deal with access handler status codes here,
6005 * makes the code a bit bloated.
6006 */
6007 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6008 if (rcStrict == VINF_SUCCESS)
6009 {
6010 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6011 if (rcStrict == VINF_SUCCESS)
6012 { /*likely */ }
6013 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6014 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6015 else
6016 {
6017 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6018 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6019 return rcStrict;
6020 }
6021 }
6022 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6023 {
6024 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6025 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6026 {
6027 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6028 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6029 }
6030 else
6031 {
6032 LogEx(LOG_GROUP_IEM,
6033 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6034 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6035 return rcStrict2;
6036 }
6037 }
6038 else
6039 {
6040 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6041 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6042 return rcStrict;
6043 }
6044 }
6045 else
6046 {
6047 /*
6048 * No informational status codes here, much more straight forward.
6049 */
6050 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6051 if (RT_SUCCESS(rc))
6052 {
6053 Assert(rc == VINF_SUCCESS);
6054 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6055 if (RT_SUCCESS(rc))
6056 Assert(rc == VINF_SUCCESS);
6057 else
6058 {
6059 LogEx(LOG_GROUP_IEM,
6060 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6061 return rc;
6062 }
6063 }
6064 else
6065 {
6066 LogEx(LOG_GROUP_IEM,
6067 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6068 return rc;
6069 }
6070 }
6071 }
6072#ifdef VBOX_STRICT
6073 else
6074 memset(pbBuf, 0xcc, cbMem);
6075 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6076 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6077#endif
6078 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6079
6080 /*
6081 * Commit the bounce buffer entry.
6082 */
6083 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6084 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6085 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6086 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6087 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6088 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6090 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6091 pVCpu->iem.s.cActiveMappings++;
6092
6093 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6094 *ppvMem = pbBuf;
6095 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6096 return VINF_SUCCESS;
6097}
6098
6099
6100/**
6101 * iemMemMap woker that deals with iemMemPageMap failures.
6102 */
6103static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6104 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6105{
6106 /*
6107 * Filter out conditions we can handle and the ones which shouldn't happen.
6108 */
6109 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6110 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6111 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6112 {
6113 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6114 return rcMap;
6115 }
6116 pVCpu->iem.s.cPotentialExits++;
6117
6118 /*
6119 * Read in the current memory content if it's a read, execute or partial
6120 * write access.
6121 */
6122 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6123 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6124 {
6125 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6126 memset(pbBuf, 0xff, cbMem);
6127 else
6128 {
6129 int rc;
6130 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6131 {
6132 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6133 if (rcStrict == VINF_SUCCESS)
6134 { /* nothing */ }
6135 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6136 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6137 else
6138 {
6139 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6140 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6141 return rcStrict;
6142 }
6143 }
6144 else
6145 {
6146 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6147 if (RT_SUCCESS(rc))
6148 { /* likely */ }
6149 else
6150 {
6151 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6152 GCPhysFirst, rc));
6153 return rc;
6154 }
6155 }
6156 }
6157 }
6158#ifdef VBOX_STRICT
6159 else
6160 memset(pbBuf, 0xcc, cbMem);
6161#endif
6162#ifdef VBOX_STRICT
6163 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6164 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6165#endif
6166
6167 /*
6168 * Commit the bounce buffer entry.
6169 */
6170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6171 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6172 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6173 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6174 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6175 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6176 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6177 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6178 pVCpu->iem.s.cActiveMappings++;
6179
6180 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6181 *ppvMem = pbBuf;
6182 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6183 return VINF_SUCCESS;
6184}
6185
6186
6187
6188/**
6189 * Maps the specified guest memory for the given kind of access.
6190 *
6191 * This may be using bounce buffering of the memory if it's crossing a page
6192 * boundary or if there is an access handler installed for any of it. Because
6193 * of lock prefix guarantees, we're in for some extra clutter when this
6194 * happens.
6195 *
6196 * This may raise a \#GP, \#SS, \#PF or \#AC.
6197 *
6198 * @returns VBox strict status code.
6199 *
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 * @param ppvMem Where to return the pointer to the mapped memory.
6202 * @param pbUnmapInfo Where to return unmap info to be passed to
6203 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6204 * done.
6205 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6206 * 8, 12, 16, 32 or 512. When used by string operations
6207 * it can be up to a page.
6208 * @param iSegReg The index of the segment register to use for this
6209 * access. The base and limits are checked. Use UINT8_MAX
6210 * to indicate that no segmentation is required (for IDT,
6211 * GDT and LDT accesses).
6212 * @param GCPtrMem The address of the guest memory.
6213 * @param fAccess How the memory is being accessed. The
6214 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6215 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6216 * when raising exceptions.
6217 * @param uAlignCtl Alignment control:
6218 * - Bits 15:0 is the alignment mask.
6219 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6220 * IEM_MEMMAP_F_ALIGN_SSE, and
6221 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6222 * Pass zero to skip alignment.
6223 */
6224VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6225 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6226{
6227 /*
6228 * Check the input and figure out which mapping entry to use.
6229 */
6230 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6231 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6232 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6233 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6234 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6235
6236 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6237 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6238 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6239 {
6240 iMemMap = iemMemMapFindFree(pVCpu);
6241 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6242 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6243 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6244 pVCpu->iem.s.aMemMappings[2].fAccess),
6245 VERR_IEM_IPE_9);
6246 }
6247
6248 /*
6249 * Map the memory, checking that we can actually access it. If something
6250 * slightly complicated happens, fall back on bounce buffering.
6251 */
6252 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6253 if (rcStrict == VINF_SUCCESS)
6254 { /* likely */ }
6255 else
6256 return rcStrict;
6257
6258 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6259 { /* likely */ }
6260 else
6261 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6262
6263 /*
6264 * Alignment check.
6265 */
6266 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6267 { /* likelyish */ }
6268 else
6269 {
6270 /* Misaligned access. */
6271 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6272 {
6273 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6274 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6275 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6276 {
6277 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6278
6279 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6280 return iemRaiseAlignmentCheckException(pVCpu);
6281 }
6282 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6283 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6284 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6285 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6286 * that's what FXSAVE does on a 10980xe. */
6287 && iemMemAreAlignmentChecksEnabled(pVCpu))
6288 return iemRaiseAlignmentCheckException(pVCpu);
6289 else
6290 return iemRaiseGeneralProtectionFault0(pVCpu);
6291 }
6292 }
6293
6294#ifdef IEM_WITH_DATA_TLB
6295 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6296
6297 /*
6298 * Get the TLB entry for this page.
6299 */
6300 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6301 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6302 if (pTlbe->uTag == uTag)
6303 {
6304# ifdef VBOX_WITH_STATISTICS
6305 pVCpu->iem.s.DataTlb.cTlbHits++;
6306# endif
6307 }
6308 else
6309 {
6310 pVCpu->iem.s.DataTlb.cTlbMisses++;
6311 PGMPTWALK Walk;
6312 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6313 if (RT_FAILURE(rc))
6314 {
6315 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6316# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6317 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6318 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6319# endif
6320 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6321 }
6322
6323 Assert(Walk.fSucceeded);
6324 pTlbe->uTag = uTag;
6325 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6326 pTlbe->GCPhys = Walk.GCPhys;
6327 pTlbe->pbMappingR3 = NULL;
6328 }
6329
6330 /*
6331 * Check TLB page table level access flags.
6332 */
6333 /* If the page is either supervisor only or non-writable, we need to do
6334 more careful access checks. */
6335 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6336 {
6337 /* Write to read only memory? */
6338 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6339 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6340 && ( ( IEM_GET_CPL(pVCpu) == 3
6341 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6342 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6343 {
6344 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6345# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6346 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6347 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6348# endif
6349 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6350 }
6351
6352 /* Kernel memory accessed by userland? */
6353 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6354 && IEM_GET_CPL(pVCpu) == 3
6355 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6356 {
6357 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6358# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6359 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6360 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6361# endif
6362 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6363 }
6364 }
6365
6366 /*
6367 * Set the dirty / access flags.
6368 * ASSUMES this is set when the address is translated rather than on commit...
6369 */
6370 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6371 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6372 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6373 {
6374 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6375 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6376 AssertRC(rc2);
6377 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6378 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6379 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6380 }
6381
6382 /*
6383 * Look up the physical page info if necessary.
6384 */
6385 uint8_t *pbMem = NULL;
6386 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6387# ifdef IN_RING3
6388 pbMem = pTlbe->pbMappingR3;
6389# else
6390 pbMem = NULL;
6391# endif
6392 else
6393 {
6394 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6395 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6396 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6397 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6398 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6399 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6400 { /* likely */ }
6401 else
6402 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6403 pTlbe->pbMappingR3 = NULL;
6404 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6405 | IEMTLBE_F_NO_MAPPINGR3
6406 | IEMTLBE_F_PG_NO_READ
6407 | IEMTLBE_F_PG_NO_WRITE
6408 | IEMTLBE_F_PG_UNASSIGNED
6409 | IEMTLBE_F_PG_CODE_PAGE);
6410 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6411 &pbMem, &pTlbe->fFlagsAndPhysRev);
6412 AssertRCReturn(rc, rc);
6413# ifdef IN_RING3
6414 pTlbe->pbMappingR3 = pbMem;
6415# endif
6416 }
6417
6418 /*
6419 * Check the physical page level access and mapping.
6420 */
6421 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6422 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6423 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6424 { /* probably likely */ }
6425 else
6426 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6427 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6428 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6429 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6430 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6431 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6432
6433 if (pbMem)
6434 {
6435 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6436 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6437 fAccess |= IEM_ACCESS_NOT_LOCKED;
6438 }
6439 else
6440 {
6441 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6442 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6443 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6444 if (rcStrict != VINF_SUCCESS)
6445 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6446 }
6447
6448 void * const pvMem = pbMem;
6449
6450 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6451 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6452 if (fAccess & IEM_ACCESS_TYPE_READ)
6453 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6454
6455#else /* !IEM_WITH_DATA_TLB */
6456
6457 RTGCPHYS GCPhysFirst;
6458 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6459 if (rcStrict != VINF_SUCCESS)
6460 return rcStrict;
6461
6462 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6463 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6464 if (fAccess & IEM_ACCESS_TYPE_READ)
6465 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6466
6467 void *pvMem;
6468 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6469 if (rcStrict != VINF_SUCCESS)
6470 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6471
6472#endif /* !IEM_WITH_DATA_TLB */
6473
6474 /*
6475 * Fill in the mapping table entry.
6476 */
6477 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6478 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6479 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6480 pVCpu->iem.s.cActiveMappings += 1;
6481
6482 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6483 *ppvMem = pvMem;
6484 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6485 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6486 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6487
6488 return VINF_SUCCESS;
6489}
6490
6491
6492/**
6493 * Commits the guest memory if bounce buffered and unmaps it.
6494 *
6495 * @returns Strict VBox status code.
6496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6497 * @param bUnmapInfo Unmap info set by iemMemMap.
6498 */
6499VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6500{
6501 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6502 AssertMsgReturn( (bUnmapInfo & 0x08)
6503 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6504 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6505 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6506 VERR_NOT_FOUND);
6507
6508 /* If it's bounce buffered, we may need to write back the buffer. */
6509 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6510 {
6511 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6512 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6513 }
6514 /* Otherwise unlock it. */
6515 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6516 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6517
6518 /* Free the entry. */
6519 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6520 Assert(pVCpu->iem.s.cActiveMappings != 0);
6521 pVCpu->iem.s.cActiveMappings--;
6522 return VINF_SUCCESS;
6523}
6524
6525
6526/**
6527 * Rolls back the guest memory (conceptually only) and unmaps it.
6528 *
6529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6530 * @param bUnmapInfo Unmap info set by iemMemMap.
6531 */
6532void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6533{
6534 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6535 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6536 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6537 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6538 == ((unsigned)bUnmapInfo >> 4),
6539 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6540
6541 /* Unlock it if necessary. */
6542 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6543 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6544
6545 /* Free the entry. */
6546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6547 Assert(pVCpu->iem.s.cActiveMappings != 0);
6548 pVCpu->iem.s.cActiveMappings--;
6549}
6550
6551#ifdef IEM_WITH_SETJMP
6552
6553/**
6554 * Maps the specified guest memory for the given kind of access, longjmp on
6555 * error.
6556 *
6557 * This may be using bounce buffering of the memory if it's crossing a page
6558 * boundary or if there is an access handler installed for any of it. Because
6559 * of lock prefix guarantees, we're in for some extra clutter when this
6560 * happens.
6561 *
6562 * This may raise a \#GP, \#SS, \#PF or \#AC.
6563 *
6564 * @returns Pointer to the mapped memory.
6565 *
6566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6567 * @param bUnmapInfo Where to return unmap info to be passed to
6568 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6569 * iemMemCommitAndUnmapWoSafeJmp,
6570 * iemMemCommitAndUnmapRoSafeJmp,
6571 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6572 * when done.
6573 * @param cbMem The number of bytes to map. This is usually 1,
6574 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6575 * string operations it can be up to a page.
6576 * @param iSegReg The index of the segment register to use for
6577 * this access. The base and limits are checked.
6578 * Use UINT8_MAX to indicate that no segmentation
6579 * is required (for IDT, GDT and LDT accesses).
6580 * @param GCPtrMem The address of the guest memory.
6581 * @param fAccess How the memory is being accessed. The
6582 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6583 * how to map the memory, while the
6584 * IEM_ACCESS_WHAT_XXX bit is used when raising
6585 * exceptions.
6586 * @param uAlignCtl Alignment control:
6587 * - Bits 15:0 is the alignment mask.
6588 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6589 * IEM_MEMMAP_F_ALIGN_SSE, and
6590 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6591 * Pass zero to skip alignment.
6592 */
6593void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6594 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6595{
6596 /*
6597 * Check the input, check segment access and adjust address
6598 * with segment base.
6599 */
6600 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6601 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6602 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6603
6604 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6605 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6606 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6607
6608 /*
6609 * Alignment check.
6610 */
6611 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6612 { /* likelyish */ }
6613 else
6614 {
6615 /* Misaligned access. */
6616 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6617 {
6618 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6619 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6620 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6621 {
6622 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6623
6624 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6625 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6626 }
6627 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6628 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6629 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6630 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6631 * that's what FXSAVE does on a 10980xe. */
6632 && iemMemAreAlignmentChecksEnabled(pVCpu))
6633 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6634 else
6635 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6636 }
6637 }
6638
6639 /*
6640 * Figure out which mapping entry to use.
6641 */
6642 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6643 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6644 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6645 {
6646 iMemMap = iemMemMapFindFree(pVCpu);
6647 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6648 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6649 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6650 pVCpu->iem.s.aMemMappings[2].fAccess),
6651 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6652 }
6653
6654 /*
6655 * Crossing a page boundary?
6656 */
6657 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6658 { /* No (likely). */ }
6659 else
6660 {
6661 void *pvMem;
6662 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6663 if (rcStrict == VINF_SUCCESS)
6664 return pvMem;
6665 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6666 }
6667
6668#ifdef IEM_WITH_DATA_TLB
6669 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6670
6671 /*
6672 * Get the TLB entry for this page.
6673 */
6674 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6675 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6676 if (pTlbe->uTag == uTag)
6677 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6678 else
6679 {
6680 pVCpu->iem.s.DataTlb.cTlbMisses++;
6681 PGMPTWALK Walk;
6682 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6683 if (RT_FAILURE(rc))
6684 {
6685 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6686# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6687 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6688 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6689# endif
6690 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6691 }
6692
6693 Assert(Walk.fSucceeded);
6694 pTlbe->uTag = uTag;
6695 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6696 pTlbe->GCPhys = Walk.GCPhys;
6697 pTlbe->pbMappingR3 = NULL;
6698 }
6699
6700 /*
6701 * Check the flags and physical revision.
6702 */
6703 /** @todo make the caller pass these in with fAccess. */
6704 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6705 ? IEMTLBE_F_PT_NO_USER : 0;
6706 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6707 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6708 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6709 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6710 ? IEMTLBE_F_PT_NO_WRITE : 0)
6711 : 0;
6712 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6713 uint8_t *pbMem = NULL;
6714 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6715 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6716# ifdef IN_RING3
6717 pbMem = pTlbe->pbMappingR3;
6718# else
6719 pbMem = NULL;
6720# endif
6721 else
6722 {
6723 /*
6724 * Okay, something isn't quite right or needs refreshing.
6725 */
6726 /* Write to read only memory? */
6727 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6728 {
6729 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6730# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6731 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6732 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6733# endif
6734 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6735 }
6736
6737 /* Kernel memory accessed by userland? */
6738 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6739 {
6740 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6741# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6742 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6743 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6744# endif
6745 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6746 }
6747
6748 /* Set the dirty / access flags.
6749 ASSUMES this is set when the address is translated rather than on commit... */
6750 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6751 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6752 {
6753 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6754 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6755 AssertRC(rc2);
6756 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6757 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6758 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6759 }
6760
6761 /*
6762 * Check if the physical page info needs updating.
6763 */
6764 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6765# ifdef IN_RING3
6766 pbMem = pTlbe->pbMappingR3;
6767# else
6768 pbMem = NULL;
6769# endif
6770 else
6771 {
6772 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6773 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6774 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6775 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6776 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6777 pTlbe->pbMappingR3 = NULL;
6778 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6779 | IEMTLBE_F_NO_MAPPINGR3
6780 | IEMTLBE_F_PG_NO_READ
6781 | IEMTLBE_F_PG_NO_WRITE
6782 | IEMTLBE_F_PG_UNASSIGNED
6783 | IEMTLBE_F_PG_CODE_PAGE);
6784 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6785 &pbMem, &pTlbe->fFlagsAndPhysRev);
6786 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6787# ifdef IN_RING3
6788 pTlbe->pbMappingR3 = pbMem;
6789# endif
6790 }
6791
6792 /*
6793 * Check the physical page level access and mapping.
6794 */
6795 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6796 { /* probably likely */ }
6797 else
6798 {
6799 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6800 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6801 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6802 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6803 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6804 if (rcStrict == VINF_SUCCESS)
6805 return pbMem;
6806 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6807 }
6808 }
6809 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6810
6811 if (pbMem)
6812 {
6813 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6814 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6815 fAccess |= IEM_ACCESS_NOT_LOCKED;
6816 }
6817 else
6818 {
6819 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6820 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6821 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6822 if (rcStrict == VINF_SUCCESS)
6823 {
6824 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6825 return pbMem;
6826 }
6827 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6828 }
6829
6830 void * const pvMem = pbMem;
6831
6832 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6833 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6834 if (fAccess & IEM_ACCESS_TYPE_READ)
6835 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6836
6837#else /* !IEM_WITH_DATA_TLB */
6838
6839
6840 RTGCPHYS GCPhysFirst;
6841 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6842 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6843 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6844
6845 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6846 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6847 if (fAccess & IEM_ACCESS_TYPE_READ)
6848 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6849
6850 void *pvMem;
6851 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6852 if (rcStrict == VINF_SUCCESS)
6853 { /* likely */ }
6854 else
6855 {
6856 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6857 if (rcStrict == VINF_SUCCESS)
6858 return pvMem;
6859 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6860 }
6861
6862#endif /* !IEM_WITH_DATA_TLB */
6863
6864 /*
6865 * Fill in the mapping table entry.
6866 */
6867 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6868 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6869 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6870 pVCpu->iem.s.cActiveMappings++;
6871
6872 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6873
6874 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6875 return pvMem;
6876}
6877
6878
6879/**
6880 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6881 *
6882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6883 * @param pvMem The mapping.
6884 * @param fAccess The kind of access.
6885 */
6886void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6887{
6888 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6889 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6890 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6891 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6892 == ((unsigned)bUnmapInfo >> 4),
6893 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6894
6895 /* If it's bounce buffered, we may need to write back the buffer. */
6896 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6897 {
6898 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6899 {
6900 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6901 if (rcStrict == VINF_SUCCESS)
6902 return;
6903 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6904 }
6905 }
6906 /* Otherwise unlock it. */
6907 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6908 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6909
6910 /* Free the entry. */
6911 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6912 Assert(pVCpu->iem.s.cActiveMappings != 0);
6913 pVCpu->iem.s.cActiveMappings--;
6914}
6915
6916
6917/** Fallback for iemMemCommitAndUnmapRwJmp. */
6918void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6919{
6920 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6921 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6922}
6923
6924
6925/** Fallback for iemMemCommitAndUnmapWoJmp. */
6926void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6927{
6928 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6929 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6930}
6931
6932
6933/** Fallback for iemMemCommitAndUnmapRoJmp. */
6934void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6935{
6936 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
6937 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6938}
6939
6940
6941/** Fallback for iemMemRollbackAndUnmapWo. */
6942void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6943{
6944 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6945 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
6946}
6947
6948#endif /* IEM_WITH_SETJMP */
6949
6950#ifndef IN_RING3
6951/**
6952 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6953 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6954 *
6955 * Allows the instruction to be completed and retired, while the IEM user will
6956 * return to ring-3 immediately afterwards and do the postponed writes there.
6957 *
6958 * @returns VBox status code (no strict statuses). Caller must check
6959 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 * @param pvMem The mapping.
6962 * @param fAccess The kind of access.
6963 */
6964VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6965{
6966 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6967 AssertMsgReturn( (bUnmapInfo & 0x08)
6968 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6969 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6970 == ((unsigned)bUnmapInfo >> 4),
6971 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6972 VERR_NOT_FOUND);
6973
6974 /* If it's bounce buffered, we may need to write back the buffer. */
6975 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6976 {
6977 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6978 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6979 }
6980 /* Otherwise unlock it. */
6981 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6982 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6983
6984 /* Free the entry. */
6985 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6986 Assert(pVCpu->iem.s.cActiveMappings != 0);
6987 pVCpu->iem.s.cActiveMappings--;
6988 return VINF_SUCCESS;
6989}
6990#endif
6991
6992
6993/**
6994 * Rollbacks mappings, releasing page locks and such.
6995 *
6996 * The caller shall only call this after checking cActiveMappings.
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7001{
7002 Assert(pVCpu->iem.s.cActiveMappings > 0);
7003
7004 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7005 while (iMemMap-- > 0)
7006 {
7007 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7008 if (fAccess != IEM_ACCESS_INVALID)
7009 {
7010 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7011 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7012 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7013 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7014 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7015 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7016 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7017 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7018 pVCpu->iem.s.cActiveMappings--;
7019 }
7020 }
7021}
7022
7023
7024/*
7025 * Instantiate R/W templates.
7026 */
7027#define TMPL_MEM_WITH_STACK
7028
7029#define TMPL_MEM_TYPE uint8_t
7030#define TMPL_MEM_FN_SUFF U8
7031#define TMPL_MEM_FMT_TYPE "%#04x"
7032#define TMPL_MEM_FMT_DESC "byte"
7033#include "IEMAllMemRWTmpl.cpp.h"
7034
7035#define TMPL_MEM_TYPE uint16_t
7036#define TMPL_MEM_FN_SUFF U16
7037#define TMPL_MEM_FMT_TYPE "%#06x"
7038#define TMPL_MEM_FMT_DESC "word"
7039#include "IEMAllMemRWTmpl.cpp.h"
7040
7041#define TMPL_WITH_PUSH_SREG
7042#define TMPL_MEM_TYPE uint32_t
7043#define TMPL_MEM_FN_SUFF U32
7044#define TMPL_MEM_FMT_TYPE "%#010x"
7045#define TMPL_MEM_FMT_DESC "dword"
7046#include "IEMAllMemRWTmpl.cpp.h"
7047#undef TMPL_WITH_PUSH_SREG
7048
7049#define TMPL_MEM_TYPE uint64_t
7050#define TMPL_MEM_FN_SUFF U64
7051#define TMPL_MEM_FMT_TYPE "%#018RX64"
7052#define TMPL_MEM_FMT_DESC "qword"
7053#include "IEMAllMemRWTmpl.cpp.h"
7054
7055#undef TMPL_MEM_WITH_STACK
7056
7057#define TMPL_MEM_TYPE uint64_t
7058#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7059#define TMPL_MEM_FN_SUFF U64AlignedU128
7060#define TMPL_MEM_FMT_TYPE "%#018RX64"
7061#define TMPL_MEM_FMT_DESC "qword"
7062#include "IEMAllMemRWTmpl.cpp.h"
7063
7064/* See IEMAllMemRWTmplInline.cpp.h */
7065#define TMPL_MEM_BY_REF
7066
7067#define TMPL_MEM_TYPE RTFLOAT80U
7068#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7069#define TMPL_MEM_FN_SUFF R80
7070#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7071#define TMPL_MEM_FMT_DESC "tword"
7072#include "IEMAllMemRWTmpl.cpp.h"
7073
7074#define TMPL_MEM_TYPE RTPBCD80U
7075#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7076#define TMPL_MEM_FN_SUFF D80
7077#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7078#define TMPL_MEM_FMT_DESC "tword"
7079#include "IEMAllMemRWTmpl.cpp.h"
7080
7081#define TMPL_MEM_TYPE RTUINT128U
7082#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7083#define TMPL_MEM_FN_SUFF U128
7084#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7085#define TMPL_MEM_FMT_DESC "dqword"
7086#include "IEMAllMemRWTmpl.cpp.h"
7087
7088
7089/**
7090 * Fetches a data dword and zero extends it to a qword.
7091 *
7092 * @returns Strict VBox status code.
7093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7094 * @param pu64Dst Where to return the qword.
7095 * @param iSegReg The index of the segment register to use for
7096 * this access. The base and limits are checked.
7097 * @param GCPtrMem The address of the guest memory.
7098 */
7099VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7100{
7101 /* The lazy approach for now... */
7102 uint8_t bUnmapInfo;
7103 uint32_t const *pu32Src;
7104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7105 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7106 if (rc == VINF_SUCCESS)
7107 {
7108 *pu64Dst = *pu32Src;
7109 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7110 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7111 }
7112 return rc;
7113}
7114
7115
7116#ifdef SOME_UNUSED_FUNCTION
7117/**
7118 * Fetches a data dword and sign extends it to a qword.
7119 *
7120 * @returns Strict VBox status code.
7121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7122 * @param pu64Dst Where to return the sign extended value.
7123 * @param iSegReg The index of the segment register to use for
7124 * this access. The base and limits are checked.
7125 * @param GCPtrMem The address of the guest memory.
7126 */
7127VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7128{
7129 /* The lazy approach for now... */
7130 uint8_t bUnmapInfo;
7131 int32_t const *pi32Src;
7132 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7133 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7134 if (rc == VINF_SUCCESS)
7135 {
7136 *pu64Dst = *pi32Src;
7137 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7138 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7139 }
7140#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7141 else
7142 *pu64Dst = 0;
7143#endif
7144 return rc;
7145}
7146#endif
7147
7148
7149/**
7150 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7151 * related.
7152 *
7153 * Raises \#GP(0) if not aligned.
7154 *
7155 * @returns Strict VBox status code.
7156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7157 * @param pu128Dst Where to return the qword.
7158 * @param iSegReg The index of the segment register to use for
7159 * this access. The base and limits are checked.
7160 * @param GCPtrMem The address of the guest memory.
7161 */
7162VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7163{
7164 /* The lazy approach for now... */
7165 uint8_t bUnmapInfo;
7166 PCRTUINT128U pu128Src;
7167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7168 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7169 if (rc == VINF_SUCCESS)
7170 {
7171 pu128Dst->au64[0] = pu128Src->au64[0];
7172 pu128Dst->au64[1] = pu128Src->au64[1];
7173 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7174 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7175 }
7176 return rc;
7177}
7178
7179
7180#ifdef IEM_WITH_SETJMP
7181/**
7182 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7183 * related, longjmp on error.
7184 *
7185 * Raises \#GP(0) if not aligned.
7186 *
7187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7188 * @param pu128Dst Where to return the qword.
7189 * @param iSegReg The index of the segment register to use for
7190 * this access. The base and limits are checked.
7191 * @param GCPtrMem The address of the guest memory.
7192 */
7193void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7194 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7195{
7196 /* The lazy approach for now... */
7197 uint8_t bUnmapInfo;
7198 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7199 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7200 pu128Dst->au64[0] = pu128Src->au64[0];
7201 pu128Dst->au64[1] = pu128Src->au64[1];
7202 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7203 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7204}
7205#endif
7206
7207
7208/**
7209 * Fetches a data oword (octo word), generally AVX related.
7210 *
7211 * @returns Strict VBox status code.
7212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7213 * @param pu256Dst Where to return the qword.
7214 * @param iSegReg The index of the segment register to use for
7215 * this access. The base and limits are checked.
7216 * @param GCPtrMem The address of the guest memory.
7217 */
7218VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7219{
7220 /* The lazy approach for now... */
7221 uint8_t bUnmapInfo;
7222 PCRTUINT256U pu256Src;
7223 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7224 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7225 if (rc == VINF_SUCCESS)
7226 {
7227 pu256Dst->au64[0] = pu256Src->au64[0];
7228 pu256Dst->au64[1] = pu256Src->au64[1];
7229 pu256Dst->au64[2] = pu256Src->au64[2];
7230 pu256Dst->au64[3] = pu256Src->au64[3];
7231 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7232 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7233 }
7234 return rc;
7235}
7236
7237
7238#ifdef IEM_WITH_SETJMP
7239/**
7240 * Fetches a data oword (octo word), generally AVX related.
7241 *
7242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7243 * @param pu256Dst Where to return the qword.
7244 * @param iSegReg The index of the segment register to use for
7245 * this access. The base and limits are checked.
7246 * @param GCPtrMem The address of the guest memory.
7247 */
7248void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7249{
7250 /* The lazy approach for now... */
7251 uint8_t bUnmapInfo;
7252 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7253 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7254 pu256Dst->au64[0] = pu256Src->au64[0];
7255 pu256Dst->au64[1] = pu256Src->au64[1];
7256 pu256Dst->au64[2] = pu256Src->au64[2];
7257 pu256Dst->au64[3] = pu256Src->au64[3];
7258 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7259 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7260}
7261#endif
7262
7263
7264/**
7265 * Fetches a data oword (octo word) at an aligned address, generally AVX
7266 * related.
7267 *
7268 * Raises \#GP(0) if not aligned.
7269 *
7270 * @returns Strict VBox status code.
7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7272 * @param pu256Dst Where to return the qword.
7273 * @param iSegReg The index of the segment register to use for
7274 * this access. The base and limits are checked.
7275 * @param GCPtrMem The address of the guest memory.
7276 */
7277VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7278{
7279 /* The lazy approach for now... */
7280 uint8_t bUnmapInfo;
7281 PCRTUINT256U pu256Src;
7282 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7283 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7284 if (rc == VINF_SUCCESS)
7285 {
7286 pu256Dst->au64[0] = pu256Src->au64[0];
7287 pu256Dst->au64[1] = pu256Src->au64[1];
7288 pu256Dst->au64[2] = pu256Src->au64[2];
7289 pu256Dst->au64[3] = pu256Src->au64[3];
7290 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7291 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7292 }
7293 return rc;
7294}
7295
7296
7297#ifdef IEM_WITH_SETJMP
7298/**
7299 * Fetches a data oword (octo word) at an aligned address, generally AVX
7300 * related, longjmp on error.
7301 *
7302 * Raises \#GP(0) if not aligned.
7303 *
7304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7305 * @param pu256Dst Where to return the qword.
7306 * @param iSegReg The index of the segment register to use for
7307 * this access. The base and limits are checked.
7308 * @param GCPtrMem The address of the guest memory.
7309 */
7310void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7311 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7312{
7313 /* The lazy approach for now... */
7314 uint8_t bUnmapInfo;
7315 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7316 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7317 pu256Dst->au64[0] = pu256Src->au64[0];
7318 pu256Dst->au64[1] = pu256Src->au64[1];
7319 pu256Dst->au64[2] = pu256Src->au64[2];
7320 pu256Dst->au64[3] = pu256Src->au64[3];
7321 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7322 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7323}
7324#endif
7325
7326
7327
7328/**
7329 * Fetches a descriptor register (lgdt, lidt).
7330 *
7331 * @returns Strict VBox status code.
7332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7333 * @param pcbLimit Where to return the limit.
7334 * @param pGCPtrBase Where to return the base.
7335 * @param iSegReg The index of the segment register to use for
7336 * this access. The base and limits are checked.
7337 * @param GCPtrMem The address of the guest memory.
7338 * @param enmOpSize The effective operand size.
7339 */
7340VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7341 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7342{
7343 /*
7344 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7345 * little special:
7346 * - The two reads are done separately.
7347 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7348 * - We suspect the 386 to actually commit the limit before the base in
7349 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7350 * don't try emulate this eccentric behavior, because it's not well
7351 * enough understood and rather hard to trigger.
7352 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7353 */
7354 VBOXSTRICTRC rcStrict;
7355 if (IEM_IS_64BIT_CODE(pVCpu))
7356 {
7357 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7358 if (rcStrict == VINF_SUCCESS)
7359 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7360 }
7361 else
7362 {
7363 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7364 if (enmOpSize == IEMMODE_32BIT)
7365 {
7366 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7367 {
7368 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7369 if (rcStrict == VINF_SUCCESS)
7370 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7371 }
7372 else
7373 {
7374 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7375 if (rcStrict == VINF_SUCCESS)
7376 {
7377 *pcbLimit = (uint16_t)uTmp;
7378 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7379 }
7380 }
7381 if (rcStrict == VINF_SUCCESS)
7382 *pGCPtrBase = uTmp;
7383 }
7384 else
7385 {
7386 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7387 if (rcStrict == VINF_SUCCESS)
7388 {
7389 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7390 if (rcStrict == VINF_SUCCESS)
7391 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7392 }
7393 }
7394 }
7395 return rcStrict;
7396}
7397
7398
7399/**
7400 * Stores a data dqword, SSE aligned.
7401 *
7402 * @returns Strict VBox status code.
7403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 * @param u128Value The value to store.
7408 */
7409VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7410{
7411 /* The lazy approach for now... */
7412 uint8_t bUnmapInfo;
7413 PRTUINT128U pu128Dst;
7414 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7415 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7416 if (rc == VINF_SUCCESS)
7417 {
7418 pu128Dst->au64[0] = u128Value.au64[0];
7419 pu128Dst->au64[1] = u128Value.au64[1];
7420 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7421 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7422 }
7423 return rc;
7424}
7425
7426
7427#ifdef IEM_WITH_SETJMP
7428/**
7429 * Stores a data dqword, SSE aligned.
7430 *
7431 * @returns Strict VBox status code.
7432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7433 * @param iSegReg The index of the segment register to use for
7434 * this access. The base and limits are checked.
7435 * @param GCPtrMem The address of the guest memory.
7436 * @param u128Value The value to store.
7437 */
7438void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7439 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7440{
7441 /* The lazy approach for now... */
7442 uint8_t bUnmapInfo;
7443 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7444 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7445 pu128Dst->au64[0] = u128Value.au64[0];
7446 pu128Dst->au64[1] = u128Value.au64[1];
7447 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7448 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7449}
7450#endif
7451
7452
7453/**
7454 * Stores a data dqword.
7455 *
7456 * @returns Strict VBox status code.
7457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7458 * @param iSegReg The index of the segment register to use for
7459 * this access. The base and limits are checked.
7460 * @param GCPtrMem The address of the guest memory.
7461 * @param pu256Value Pointer to the value to store.
7462 */
7463VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7464{
7465 /* The lazy approach for now... */
7466 uint8_t bUnmapInfo;
7467 PRTUINT256U pu256Dst;
7468 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7469 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7470 if (rc == VINF_SUCCESS)
7471 {
7472 pu256Dst->au64[0] = pu256Value->au64[0];
7473 pu256Dst->au64[1] = pu256Value->au64[1];
7474 pu256Dst->au64[2] = pu256Value->au64[2];
7475 pu256Dst->au64[3] = pu256Value->au64[3];
7476 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7477 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7478 }
7479 return rc;
7480}
7481
7482
7483#ifdef IEM_WITH_SETJMP
7484/**
7485 * Stores a data dqword, longjmp on error.
7486 *
7487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7488 * @param iSegReg The index of the segment register to use for
7489 * this access. The base and limits are checked.
7490 * @param GCPtrMem The address of the guest memory.
7491 * @param pu256Value Pointer to the value to store.
7492 */
7493void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7494{
7495 /* The lazy approach for now... */
7496 uint8_t bUnmapInfo;
7497 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7498 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7499 pu256Dst->au64[0] = pu256Value->au64[0];
7500 pu256Dst->au64[1] = pu256Value->au64[1];
7501 pu256Dst->au64[2] = pu256Value->au64[2];
7502 pu256Dst->au64[3] = pu256Value->au64[3];
7503 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7504 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7505}
7506#endif
7507
7508
7509/**
7510 * Stores a data dqword, AVX \#GP(0) aligned.
7511 *
7512 * @returns Strict VBox status code.
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param iSegReg The index of the segment register to use for
7515 * this access. The base and limits are checked.
7516 * @param GCPtrMem The address of the guest memory.
7517 * @param pu256Value Pointer to the value to store.
7518 */
7519VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7520{
7521 /* The lazy approach for now... */
7522 uint8_t bUnmapInfo;
7523 PRTUINT256U pu256Dst;
7524 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7525 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7526 if (rc == VINF_SUCCESS)
7527 {
7528 pu256Dst->au64[0] = pu256Value->au64[0];
7529 pu256Dst->au64[1] = pu256Value->au64[1];
7530 pu256Dst->au64[2] = pu256Value->au64[2];
7531 pu256Dst->au64[3] = pu256Value->au64[3];
7532 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7533 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7534 }
7535 return rc;
7536}
7537
7538
7539#ifdef IEM_WITH_SETJMP
7540/**
7541 * Stores a data dqword, AVX aligned.
7542 *
7543 * @returns Strict VBox status code.
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 * @param iSegReg The index of the segment register to use for
7546 * this access. The base and limits are checked.
7547 * @param GCPtrMem The address of the guest memory.
7548 * @param pu256Value Pointer to the value to store.
7549 */
7550void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7551 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7552{
7553 /* The lazy approach for now... */
7554 uint8_t bUnmapInfo;
7555 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7556 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7557 pu256Dst->au64[0] = pu256Value->au64[0];
7558 pu256Dst->au64[1] = pu256Value->au64[1];
7559 pu256Dst->au64[2] = pu256Value->au64[2];
7560 pu256Dst->au64[3] = pu256Value->au64[3];
7561 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7562 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7563}
7564#endif
7565
7566
7567/**
7568 * Stores a descriptor register (sgdt, sidt).
7569 *
7570 * @returns Strict VBox status code.
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param cbLimit The limit.
7573 * @param GCPtrBase The base address.
7574 * @param iSegReg The index of the segment register to use for
7575 * this access. The base and limits are checked.
7576 * @param GCPtrMem The address of the guest memory.
7577 */
7578VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7579{
7580 /*
7581 * The SIDT and SGDT instructions actually stores the data using two
7582 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7583 * does not respond to opsize prefixes.
7584 */
7585 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7586 if (rcStrict == VINF_SUCCESS)
7587 {
7588 if (IEM_IS_16BIT_CODE(pVCpu))
7589 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7590 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7591 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7592 else if (IEM_IS_32BIT_CODE(pVCpu))
7593 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7594 else
7595 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7596 }
7597 return rcStrict;
7598}
7599
7600
7601/**
7602 * Begin a special stack push (used by interrupt, exceptions and such).
7603 *
7604 * This will raise \#SS or \#PF if appropriate.
7605 *
7606 * @returns Strict VBox status code.
7607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7608 * @param cbMem The number of bytes to push onto the stack.
7609 * @param cbAlign The alignment mask (7, 3, 1).
7610 * @param ppvMem Where to return the pointer to the stack memory.
7611 * As with the other memory functions this could be
7612 * direct access or bounce buffered access, so
7613 * don't commit register until the commit call
7614 * succeeds.
7615 * @param pbUnmapInfo Where to store unmap info for
7616 * iemMemStackPushCommitSpecial.
7617 * @param puNewRsp Where to return the new RSP value. This must be
7618 * passed unchanged to
7619 * iemMemStackPushCommitSpecial().
7620 */
7621VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7622 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7623{
7624 Assert(cbMem < UINT8_MAX);
7625 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7626 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7627}
7628
7629
7630/**
7631 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7632 *
7633 * This will update the rSP.
7634 *
7635 * @returns Strict VBox status code.
7636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7637 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7638 * @param uNewRsp The new RSP value returned by
7639 * iemMemStackPushBeginSpecial().
7640 */
7641VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7642{
7643 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7644 if (rcStrict == VINF_SUCCESS)
7645 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7646 return rcStrict;
7647}
7648
7649
7650/**
7651 * Begin a special stack pop (used by iret, retf and such).
7652 *
7653 * This will raise \#SS or \#PF if appropriate.
7654 *
7655 * @returns Strict VBox status code.
7656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7657 * @param cbMem The number of bytes to pop from the stack.
7658 * @param cbAlign The alignment mask (7, 3, 1).
7659 * @param ppvMem Where to return the pointer to the stack memory.
7660 * @param pbUnmapInfo Where to store unmap info for
7661 * iemMemStackPopDoneSpecial.
7662 * @param puNewRsp Where to return the new RSP value. This must be
7663 * assigned to CPUMCTX::rsp manually some time
7664 * after iemMemStackPopDoneSpecial() has been
7665 * called.
7666 */
7667VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7668 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7669{
7670 Assert(cbMem < UINT8_MAX);
7671 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7672 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7673}
7674
7675
7676/**
7677 * Continue a special stack pop (used by iret and retf), for the purpose of
7678 * retrieving a new stack pointer.
7679 *
7680 * This will raise \#SS or \#PF if appropriate.
7681 *
7682 * @returns Strict VBox status code.
7683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7684 * @param off Offset from the top of the stack. This is zero
7685 * except in the retf case.
7686 * @param cbMem The number of bytes to pop from the stack.
7687 * @param ppvMem Where to return the pointer to the stack memory.
7688 * @param pbUnmapInfo Where to store unmap info for
7689 * iemMemStackPopDoneSpecial.
7690 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7691 * return this because all use of this function is
7692 * to retrieve a new value and anything we return
7693 * here would be discarded.)
7694 */
7695VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7696 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7697{
7698 Assert(cbMem < UINT8_MAX);
7699
7700 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7701 RTGCPTR GCPtrTop;
7702 if (IEM_IS_64BIT_CODE(pVCpu))
7703 GCPtrTop = uCurNewRsp;
7704 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7705 GCPtrTop = (uint32_t)uCurNewRsp;
7706 else
7707 GCPtrTop = (uint16_t)uCurNewRsp;
7708
7709 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7710 0 /* checked in iemMemStackPopBeginSpecial */);
7711}
7712
7713
7714/**
7715 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7716 * iemMemStackPopContinueSpecial).
7717 *
7718 * The caller will manually commit the rSP.
7719 *
7720 * @returns Strict VBox status code.
7721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7722 * @param bUnmapInfo Unmap information returned by
7723 * iemMemStackPopBeginSpecial() or
7724 * iemMemStackPopContinueSpecial().
7725 */
7726VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7727{
7728 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7729}
7730
7731
7732/**
7733 * Fetches a system table byte.
7734 *
7735 * @returns Strict VBox status code.
7736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7737 * @param pbDst Where to return the byte.
7738 * @param iSegReg The index of the segment register to use for
7739 * this access. The base and limits are checked.
7740 * @param GCPtrMem The address of the guest memory.
7741 */
7742VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7743{
7744 /* The lazy approach for now... */
7745 uint8_t bUnmapInfo;
7746 uint8_t const *pbSrc;
7747 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7748 if (rc == VINF_SUCCESS)
7749 {
7750 *pbDst = *pbSrc;
7751 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7752 }
7753 return rc;
7754}
7755
7756
7757/**
7758 * Fetches a system table word.
7759 *
7760 * @returns Strict VBox status code.
7761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7762 * @param pu16Dst Where to return the word.
7763 * @param iSegReg The index of the segment register to use for
7764 * this access. The base and limits are checked.
7765 * @param GCPtrMem The address of the guest memory.
7766 */
7767VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7768{
7769 /* The lazy approach for now... */
7770 uint8_t bUnmapInfo;
7771 uint16_t const *pu16Src;
7772 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7773 if (rc == VINF_SUCCESS)
7774 {
7775 *pu16Dst = *pu16Src;
7776 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7777 }
7778 return rc;
7779}
7780
7781
7782/**
7783 * Fetches a system table dword.
7784 *
7785 * @returns Strict VBox status code.
7786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7787 * @param pu32Dst Where to return the dword.
7788 * @param iSegReg The index of the segment register to use for
7789 * this access. The base and limits are checked.
7790 * @param GCPtrMem The address of the guest memory.
7791 */
7792VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7793{
7794 /* The lazy approach for now... */
7795 uint8_t bUnmapInfo;
7796 uint32_t const *pu32Src;
7797 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7798 if (rc == VINF_SUCCESS)
7799 {
7800 *pu32Dst = *pu32Src;
7801 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7802 }
7803 return rc;
7804}
7805
7806
7807/**
7808 * Fetches a system table qword.
7809 *
7810 * @returns Strict VBox status code.
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param pu64Dst Where to return the qword.
7813 * @param iSegReg The index of the segment register to use for
7814 * this access. The base and limits are checked.
7815 * @param GCPtrMem The address of the guest memory.
7816 */
7817VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7818{
7819 /* The lazy approach for now... */
7820 uint8_t bUnmapInfo;
7821 uint64_t const *pu64Src;
7822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7823 if (rc == VINF_SUCCESS)
7824 {
7825 *pu64Dst = *pu64Src;
7826 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7827 }
7828 return rc;
7829}
7830
7831
7832/**
7833 * Fetches a descriptor table entry with caller specified error code.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param pDesc Where to return the descriptor table entry.
7838 * @param uSel The selector which table entry to fetch.
7839 * @param uXcpt The exception to raise on table lookup error.
7840 * @param uErrorCode The error code associated with the exception.
7841 */
7842static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7843 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7844{
7845 AssertPtr(pDesc);
7846 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7847
7848 /** @todo did the 286 require all 8 bytes to be accessible? */
7849 /*
7850 * Get the selector table base and check bounds.
7851 */
7852 RTGCPTR GCPtrBase;
7853 if (uSel & X86_SEL_LDT)
7854 {
7855 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7856 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7857 {
7858 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7859 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7860 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7861 uErrorCode, 0);
7862 }
7863
7864 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7865 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7866 }
7867 else
7868 {
7869 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7870 {
7871 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7872 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7873 uErrorCode, 0);
7874 }
7875 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7876 }
7877
7878 /*
7879 * Read the legacy descriptor and maybe the long mode extensions if
7880 * required.
7881 */
7882 VBOXSTRICTRC rcStrict;
7883 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7884 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7885 else
7886 {
7887 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7888 if (rcStrict == VINF_SUCCESS)
7889 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7890 if (rcStrict == VINF_SUCCESS)
7891 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7892 if (rcStrict == VINF_SUCCESS)
7893 pDesc->Legacy.au16[3] = 0;
7894 else
7895 return rcStrict;
7896 }
7897
7898 if (rcStrict == VINF_SUCCESS)
7899 {
7900 if ( !IEM_IS_LONG_MODE(pVCpu)
7901 || pDesc->Legacy.Gen.u1DescType)
7902 pDesc->Long.au64[1] = 0;
7903 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7904 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7905 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7906 else
7907 {
7908 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7909 /** @todo is this the right exception? */
7910 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7911 }
7912 }
7913 return rcStrict;
7914}
7915
7916
7917/**
7918 * Fetches a descriptor table entry.
7919 *
7920 * @returns Strict VBox status code.
7921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7922 * @param pDesc Where to return the descriptor table entry.
7923 * @param uSel The selector which table entry to fetch.
7924 * @param uXcpt The exception to raise on table lookup error.
7925 */
7926VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7927{
7928 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7929}
7930
7931
7932/**
7933 * Marks the selector descriptor as accessed (only non-system descriptors).
7934 *
7935 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7936 * will therefore skip the limit checks.
7937 *
7938 * @returns Strict VBox status code.
7939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7940 * @param uSel The selector.
7941 */
7942VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7943{
7944 /*
7945 * Get the selector table base and calculate the entry address.
7946 */
7947 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7948 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7949 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7950 GCPtr += uSel & X86_SEL_MASK;
7951
7952 /*
7953 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7954 * ugly stuff to avoid this. This will make sure it's an atomic access
7955 * as well more or less remove any question about 8-bit or 32-bit accesss.
7956 */
7957 VBOXSTRICTRC rcStrict;
7958 uint8_t bUnmapInfo;
7959 uint32_t volatile *pu32;
7960 if ((GCPtr & 3) == 0)
7961 {
7962 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7963 GCPtr += 2 + 2;
7964 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7965 if (rcStrict != VINF_SUCCESS)
7966 return rcStrict;
7967 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7968 }
7969 else
7970 {
7971 /* The misaligned GDT/LDT case, map the whole thing. */
7972 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7973 if (rcStrict != VINF_SUCCESS)
7974 return rcStrict;
7975 switch ((uintptr_t)pu32 & 3)
7976 {
7977 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7978 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7979 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7980 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7981 }
7982 }
7983
7984 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7985}
7986
7987
7988#undef LOG_GROUP
7989#define LOG_GROUP LOG_GROUP_IEM
7990
7991/** @} */
7992
7993/** @name Opcode Helpers.
7994 * @{
7995 */
7996
7997/**
7998 * Calculates the effective address of a ModR/M memory operand.
7999 *
8000 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8001 *
8002 * @return Strict VBox status code.
8003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8004 * @param bRm The ModRM byte.
8005 * @param cbImmAndRspOffset - First byte: The size of any immediate
8006 * following the effective address opcode bytes
8007 * (only for RIP relative addressing).
8008 * - Second byte: RSP displacement (for POP [ESP]).
8009 * @param pGCPtrEff Where to return the effective address.
8010 */
8011VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8012{
8013 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8014# define SET_SS_DEF() \
8015 do \
8016 { \
8017 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8018 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8019 } while (0)
8020
8021 if (!IEM_IS_64BIT_CODE(pVCpu))
8022 {
8023/** @todo Check the effective address size crap! */
8024 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8025 {
8026 uint16_t u16EffAddr;
8027
8028 /* Handle the disp16 form with no registers first. */
8029 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8030 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8031 else
8032 {
8033 /* Get the displacment. */
8034 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8035 {
8036 case 0: u16EffAddr = 0; break;
8037 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8038 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8039 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8040 }
8041
8042 /* Add the base and index registers to the disp. */
8043 switch (bRm & X86_MODRM_RM_MASK)
8044 {
8045 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8046 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8047 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8048 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8049 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8050 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8051 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8052 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8053 }
8054 }
8055
8056 *pGCPtrEff = u16EffAddr;
8057 }
8058 else
8059 {
8060 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8061 uint32_t u32EffAddr;
8062
8063 /* Handle the disp32 form with no registers first. */
8064 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8065 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8066 else
8067 {
8068 /* Get the register (or SIB) value. */
8069 switch ((bRm & X86_MODRM_RM_MASK))
8070 {
8071 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8072 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8073 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8074 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8075 case 4: /* SIB */
8076 {
8077 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8078
8079 /* Get the index and scale it. */
8080 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8081 {
8082 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8083 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8084 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8085 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8086 case 4: u32EffAddr = 0; /*none */ break;
8087 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8088 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8089 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8090 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8091 }
8092 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8093
8094 /* add base */
8095 switch (bSib & X86_SIB_BASE_MASK)
8096 {
8097 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8098 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8099 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8100 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8101 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8102 case 5:
8103 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8104 {
8105 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8106 SET_SS_DEF();
8107 }
8108 else
8109 {
8110 uint32_t u32Disp;
8111 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8112 u32EffAddr += u32Disp;
8113 }
8114 break;
8115 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8116 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8117 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8118 }
8119 break;
8120 }
8121 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8122 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8123 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8125 }
8126
8127 /* Get and add the displacement. */
8128 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8129 {
8130 case 0:
8131 break;
8132 case 1:
8133 {
8134 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8135 u32EffAddr += i8Disp;
8136 break;
8137 }
8138 case 2:
8139 {
8140 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8141 u32EffAddr += u32Disp;
8142 break;
8143 }
8144 default:
8145 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8146 }
8147
8148 }
8149 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8150 *pGCPtrEff = u32EffAddr;
8151 }
8152 }
8153 else
8154 {
8155 uint64_t u64EffAddr;
8156
8157 /* Handle the rip+disp32 form with no registers first. */
8158 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8159 {
8160 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8161 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8162 }
8163 else
8164 {
8165 /* Get the register (or SIB) value. */
8166 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8167 {
8168 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8169 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8170 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8171 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8172 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8173 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8174 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8175 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8176 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8177 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8178 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8179 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8180 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8181 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8182 /* SIB */
8183 case 4:
8184 case 12:
8185 {
8186 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8187
8188 /* Get the index and scale it. */
8189 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8190 {
8191 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8192 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8193 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8194 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8195 case 4: u64EffAddr = 0; /*none */ break;
8196 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8197 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8198 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8199 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8200 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8201 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8202 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8203 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8204 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8205 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8206 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8207 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8208 }
8209 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8210
8211 /* add base */
8212 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8213 {
8214 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8215 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8216 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8217 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8218 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8219 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8220 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8221 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8222 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8223 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8224 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8225 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8226 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8227 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8228 /* complicated encodings */
8229 case 5:
8230 case 13:
8231 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8232 {
8233 if (!pVCpu->iem.s.uRexB)
8234 {
8235 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8236 SET_SS_DEF();
8237 }
8238 else
8239 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8240 }
8241 else
8242 {
8243 uint32_t u32Disp;
8244 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8245 u64EffAddr += (int32_t)u32Disp;
8246 }
8247 break;
8248 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8249 }
8250 break;
8251 }
8252 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8253 }
8254
8255 /* Get and add the displacement. */
8256 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8257 {
8258 case 0:
8259 break;
8260 case 1:
8261 {
8262 int8_t i8Disp;
8263 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8264 u64EffAddr += i8Disp;
8265 break;
8266 }
8267 case 2:
8268 {
8269 uint32_t u32Disp;
8270 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8271 u64EffAddr += (int32_t)u32Disp;
8272 break;
8273 }
8274 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8275 }
8276
8277 }
8278
8279 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8280 *pGCPtrEff = u64EffAddr;
8281 else
8282 {
8283 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8284 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8285 }
8286 }
8287
8288 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8289 return VINF_SUCCESS;
8290}
8291
8292
8293#ifdef IEM_WITH_SETJMP
8294/**
8295 * Calculates the effective address of a ModR/M memory operand.
8296 *
8297 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8298 *
8299 * May longjmp on internal error.
8300 *
8301 * @return The effective address.
8302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8303 * @param bRm The ModRM byte.
8304 * @param cbImmAndRspOffset - First byte: The size of any immediate
8305 * following the effective address opcode bytes
8306 * (only for RIP relative addressing).
8307 * - Second byte: RSP displacement (for POP [ESP]).
8308 */
8309RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8310{
8311 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8312# define SET_SS_DEF() \
8313 do \
8314 { \
8315 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8316 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8317 } while (0)
8318
8319 if (!IEM_IS_64BIT_CODE(pVCpu))
8320 {
8321/** @todo Check the effective address size crap! */
8322 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8323 {
8324 uint16_t u16EffAddr;
8325
8326 /* Handle the disp16 form with no registers first. */
8327 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8328 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8329 else
8330 {
8331 /* Get the displacment. */
8332 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8333 {
8334 case 0: u16EffAddr = 0; break;
8335 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8336 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8337 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8338 }
8339
8340 /* Add the base and index registers to the disp. */
8341 switch (bRm & X86_MODRM_RM_MASK)
8342 {
8343 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8344 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8345 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8346 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8347 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8348 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8349 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8350 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8351 }
8352 }
8353
8354 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8355 return u16EffAddr;
8356 }
8357
8358 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8359 uint32_t u32EffAddr;
8360
8361 /* Handle the disp32 form with no registers first. */
8362 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8363 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8364 else
8365 {
8366 /* Get the register (or SIB) value. */
8367 switch ((bRm & X86_MODRM_RM_MASK))
8368 {
8369 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8370 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8371 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8372 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8373 case 4: /* SIB */
8374 {
8375 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8376
8377 /* Get the index and scale it. */
8378 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8379 {
8380 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8381 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8382 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8383 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8384 case 4: u32EffAddr = 0; /*none */ break;
8385 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8386 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8387 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8388 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8389 }
8390 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8391
8392 /* add base */
8393 switch (bSib & X86_SIB_BASE_MASK)
8394 {
8395 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8396 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8397 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8398 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8399 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8400 case 5:
8401 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8402 {
8403 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8404 SET_SS_DEF();
8405 }
8406 else
8407 {
8408 uint32_t u32Disp;
8409 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8410 u32EffAddr += u32Disp;
8411 }
8412 break;
8413 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8414 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8415 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8416 }
8417 break;
8418 }
8419 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8420 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8421 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8422 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8423 }
8424
8425 /* Get and add the displacement. */
8426 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8427 {
8428 case 0:
8429 break;
8430 case 1:
8431 {
8432 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8433 u32EffAddr += i8Disp;
8434 break;
8435 }
8436 case 2:
8437 {
8438 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8439 u32EffAddr += u32Disp;
8440 break;
8441 }
8442 default:
8443 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8444 }
8445 }
8446
8447 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8448 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8449 return u32EffAddr;
8450 }
8451
8452 uint64_t u64EffAddr;
8453
8454 /* Handle the rip+disp32 form with no registers first. */
8455 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8456 {
8457 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8458 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8459 }
8460 else
8461 {
8462 /* Get the register (or SIB) value. */
8463 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8464 {
8465 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8466 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8467 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8468 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8469 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8470 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8471 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8472 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8473 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8474 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8475 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8476 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8477 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8478 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8479 /* SIB */
8480 case 4:
8481 case 12:
8482 {
8483 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8484
8485 /* Get the index and scale it. */
8486 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8487 {
8488 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8489 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8490 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8491 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8492 case 4: u64EffAddr = 0; /*none */ break;
8493 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8494 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8495 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8496 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8497 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8498 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8499 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8500 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8501 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8502 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8503 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8504 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8505 }
8506 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8507
8508 /* add base */
8509 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8510 {
8511 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8512 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8513 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8514 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8515 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8516 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8517 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8518 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8519 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8520 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8521 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8522 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8523 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8524 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8525 /* complicated encodings */
8526 case 5:
8527 case 13:
8528 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8529 {
8530 if (!pVCpu->iem.s.uRexB)
8531 {
8532 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8533 SET_SS_DEF();
8534 }
8535 else
8536 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8537 }
8538 else
8539 {
8540 uint32_t u32Disp;
8541 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8542 u64EffAddr += (int32_t)u32Disp;
8543 }
8544 break;
8545 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8546 }
8547 break;
8548 }
8549 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8550 }
8551
8552 /* Get and add the displacement. */
8553 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8554 {
8555 case 0:
8556 break;
8557 case 1:
8558 {
8559 int8_t i8Disp;
8560 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8561 u64EffAddr += i8Disp;
8562 break;
8563 }
8564 case 2:
8565 {
8566 uint32_t u32Disp;
8567 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8568 u64EffAddr += (int32_t)u32Disp;
8569 break;
8570 }
8571 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8572 }
8573
8574 }
8575
8576 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8577 {
8578 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8579 return u64EffAddr;
8580 }
8581 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8582 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8583 return u64EffAddr & UINT32_MAX;
8584}
8585#endif /* IEM_WITH_SETJMP */
8586
8587
8588/**
8589 * Calculates the effective address of a ModR/M memory operand, extended version
8590 * for use in the recompilers.
8591 *
8592 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8593 *
8594 * @return Strict VBox status code.
8595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8596 * @param bRm The ModRM byte.
8597 * @param cbImmAndRspOffset - First byte: The size of any immediate
8598 * following the effective address opcode bytes
8599 * (only for RIP relative addressing).
8600 * - Second byte: RSP displacement (for POP [ESP]).
8601 * @param pGCPtrEff Where to return the effective address.
8602 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8603 * SIB byte (bits 39:32).
8604 */
8605VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8606{
8607 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8608# define SET_SS_DEF() \
8609 do \
8610 { \
8611 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8612 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8613 } while (0)
8614
8615 uint64_t uInfo;
8616 if (!IEM_IS_64BIT_CODE(pVCpu))
8617 {
8618/** @todo Check the effective address size crap! */
8619 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8620 {
8621 uint16_t u16EffAddr;
8622
8623 /* Handle the disp16 form with no registers first. */
8624 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8625 {
8626 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8627 uInfo = u16EffAddr;
8628 }
8629 else
8630 {
8631 /* Get the displacment. */
8632 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8633 {
8634 case 0: u16EffAddr = 0; break;
8635 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8636 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8637 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8638 }
8639 uInfo = u16EffAddr;
8640
8641 /* Add the base and index registers to the disp. */
8642 switch (bRm & X86_MODRM_RM_MASK)
8643 {
8644 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8645 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8646 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8647 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8648 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8649 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8650 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8651 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8652 }
8653 }
8654
8655 *pGCPtrEff = u16EffAddr;
8656 }
8657 else
8658 {
8659 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8660 uint32_t u32EffAddr;
8661
8662 /* Handle the disp32 form with no registers first. */
8663 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8664 {
8665 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8666 uInfo = u32EffAddr;
8667 }
8668 else
8669 {
8670 /* Get the register (or SIB) value. */
8671 uInfo = 0;
8672 switch ((bRm & X86_MODRM_RM_MASK))
8673 {
8674 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8675 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8676 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8677 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8678 case 4: /* SIB */
8679 {
8680 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8681 uInfo = (uint64_t)bSib << 32;
8682
8683 /* Get the index and scale it. */
8684 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8685 {
8686 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8687 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8688 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8689 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8690 case 4: u32EffAddr = 0; /*none */ break;
8691 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8692 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8693 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8695 }
8696 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8697
8698 /* add base */
8699 switch (bSib & X86_SIB_BASE_MASK)
8700 {
8701 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8702 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8703 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8704 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8705 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8706 case 5:
8707 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8708 {
8709 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8710 SET_SS_DEF();
8711 }
8712 else
8713 {
8714 uint32_t u32Disp;
8715 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8716 u32EffAddr += u32Disp;
8717 uInfo |= u32Disp;
8718 }
8719 break;
8720 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8721 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8722 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8723 }
8724 break;
8725 }
8726 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8727 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8728 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8730 }
8731
8732 /* Get and add the displacement. */
8733 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8734 {
8735 case 0:
8736 break;
8737 case 1:
8738 {
8739 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8740 u32EffAddr += i8Disp;
8741 uInfo |= (uint32_t)(int32_t)i8Disp;
8742 break;
8743 }
8744 case 2:
8745 {
8746 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8747 u32EffAddr += u32Disp;
8748 uInfo |= (uint32_t)u32Disp;
8749 break;
8750 }
8751 default:
8752 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8753 }
8754
8755 }
8756 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8757 *pGCPtrEff = u32EffAddr;
8758 }
8759 }
8760 else
8761 {
8762 uint64_t u64EffAddr;
8763
8764 /* Handle the rip+disp32 form with no registers first. */
8765 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8766 {
8767 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8768 uInfo = (uint32_t)u64EffAddr;
8769 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8770 }
8771 else
8772 {
8773 /* Get the register (or SIB) value. */
8774 uInfo = 0;
8775 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8776 {
8777 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8778 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8779 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8780 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8781 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8782 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8783 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8784 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8785 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8786 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8787 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8788 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8789 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8790 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8791 /* SIB */
8792 case 4:
8793 case 12:
8794 {
8795 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8796 uInfo = (uint64_t)bSib << 32;
8797
8798 /* Get the index and scale it. */
8799 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8800 {
8801 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8802 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8803 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8804 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8805 case 4: u64EffAddr = 0; /*none */ break;
8806 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8807 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8808 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8809 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8810 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8811 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8812 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8813 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8814 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8815 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8816 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8818 }
8819 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8820
8821 /* add base */
8822 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8823 {
8824 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8825 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8826 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8827 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8828 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8829 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8830 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8831 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8832 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8833 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8834 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8835 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8836 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8837 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8838 /* complicated encodings */
8839 case 5:
8840 case 13:
8841 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8842 {
8843 if (!pVCpu->iem.s.uRexB)
8844 {
8845 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8846 SET_SS_DEF();
8847 }
8848 else
8849 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8850 }
8851 else
8852 {
8853 uint32_t u32Disp;
8854 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8855 u64EffAddr += (int32_t)u32Disp;
8856 uInfo |= u32Disp;
8857 }
8858 break;
8859 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8860 }
8861 break;
8862 }
8863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8864 }
8865
8866 /* Get and add the displacement. */
8867 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8868 {
8869 case 0:
8870 break;
8871 case 1:
8872 {
8873 int8_t i8Disp;
8874 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8875 u64EffAddr += i8Disp;
8876 uInfo |= (uint32_t)(int32_t)i8Disp;
8877 break;
8878 }
8879 case 2:
8880 {
8881 uint32_t u32Disp;
8882 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8883 u64EffAddr += (int32_t)u32Disp;
8884 uInfo |= u32Disp;
8885 break;
8886 }
8887 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8888 }
8889
8890 }
8891
8892 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8893 *pGCPtrEff = u64EffAddr;
8894 else
8895 {
8896 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8897 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8898 }
8899 }
8900 *puInfo = uInfo;
8901
8902 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8903 return VINF_SUCCESS;
8904}
8905
8906/** @} */
8907
8908
8909#ifdef LOG_ENABLED
8910/**
8911 * Logs the current instruction.
8912 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8913 * @param fSameCtx Set if we have the same context information as the VMM,
8914 * clear if we may have already executed an instruction in
8915 * our debug context. When clear, we assume IEMCPU holds
8916 * valid CPU mode info.
8917 *
8918 * The @a fSameCtx parameter is now misleading and obsolete.
8919 * @param pszFunction The IEM function doing the execution.
8920 */
8921static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8922{
8923# ifdef IN_RING3
8924 if (LogIs2Enabled())
8925 {
8926 char szInstr[256];
8927 uint32_t cbInstr = 0;
8928 if (fSameCtx)
8929 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8930 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8931 szInstr, sizeof(szInstr), &cbInstr);
8932 else
8933 {
8934 uint32_t fFlags = 0;
8935 switch (IEM_GET_CPU_MODE(pVCpu))
8936 {
8937 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8938 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8939 case IEMMODE_16BIT:
8940 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8941 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8942 else
8943 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8944 break;
8945 }
8946 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8947 szInstr, sizeof(szInstr), &cbInstr);
8948 }
8949
8950 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8951 Log2(("**** %s fExec=%x\n"
8952 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8953 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8954 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8955 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8956 " %s\n"
8957 , pszFunction, pVCpu->iem.s.fExec,
8958 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8959 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8960 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8961 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8962 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8963 szInstr));
8964
8965 /* This stuff sucks atm. as it fills the log with MSRs. */
8966 //if (LogIs3Enabled())
8967 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8968 }
8969 else
8970# endif
8971 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8972 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8973 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8974}
8975#endif /* LOG_ENABLED */
8976
8977
8978#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8979/**
8980 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8981 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8982 *
8983 * @returns Modified rcStrict.
8984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8985 * @param rcStrict The instruction execution status.
8986 */
8987static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8988{
8989 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8990 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8991 {
8992 /* VMX preemption timer takes priority over NMI-window exits. */
8993 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8994 {
8995 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8996 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8997 }
8998 /*
8999 * Check remaining intercepts.
9000 *
9001 * NMI-window and Interrupt-window VM-exits.
9002 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9003 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9004 *
9005 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9006 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9007 */
9008 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9009 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9010 && !TRPMHasTrap(pVCpu))
9011 {
9012 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9013 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9014 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9015 {
9016 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9017 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9018 }
9019 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9020 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9021 {
9022 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9023 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9024 }
9025 }
9026 }
9027 /* TPR-below threshold/APIC write has the highest priority. */
9028 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9029 {
9030 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9031 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9032 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9033 }
9034 /* MTF takes priority over VMX-preemption timer. */
9035 else
9036 {
9037 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9038 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9039 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9040 }
9041 return rcStrict;
9042}
9043#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9044
9045
9046/**
9047 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9048 * IEMExecOneWithPrefetchedByPC.
9049 *
9050 * Similar code is found in IEMExecLots.
9051 *
9052 * @return Strict VBox status code.
9053 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9054 * @param fExecuteInhibit If set, execute the instruction following CLI,
9055 * POP SS and MOV SS,GR.
9056 * @param pszFunction The calling function name.
9057 */
9058DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9059{
9060 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9061 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9062 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9063 RT_NOREF_PV(pszFunction);
9064
9065#ifdef IEM_WITH_SETJMP
9066 VBOXSTRICTRC rcStrict;
9067 IEM_TRY_SETJMP(pVCpu, rcStrict)
9068 {
9069 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9070 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9071 }
9072 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9073 {
9074 pVCpu->iem.s.cLongJumps++;
9075 }
9076 IEM_CATCH_LONGJMP_END(pVCpu);
9077#else
9078 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9079 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9080#endif
9081 if (rcStrict == VINF_SUCCESS)
9082 pVCpu->iem.s.cInstructions++;
9083 if (pVCpu->iem.s.cActiveMappings > 0)
9084 {
9085 Assert(rcStrict != VINF_SUCCESS);
9086 iemMemRollback(pVCpu);
9087 }
9088 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9089 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9090 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9091
9092//#ifdef DEBUG
9093// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9094//#endif
9095
9096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9097 /*
9098 * Perform any VMX nested-guest instruction boundary actions.
9099 *
9100 * If any of these causes a VM-exit, we must skip executing the next
9101 * instruction (would run into stale page tables). A VM-exit makes sure
9102 * there is no interrupt-inhibition, so that should ensure we don't go
9103 * to try execute the next instruction. Clearing fExecuteInhibit is
9104 * problematic because of the setjmp/longjmp clobbering above.
9105 */
9106 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9107 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9108 || rcStrict != VINF_SUCCESS)
9109 { /* likely */ }
9110 else
9111 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9112#endif
9113
9114 /* Execute the next instruction as well if a cli, pop ss or
9115 mov ss, Gr has just completed successfully. */
9116 if ( fExecuteInhibit
9117 && rcStrict == VINF_SUCCESS
9118 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9119 {
9120 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9121 if (rcStrict == VINF_SUCCESS)
9122 {
9123#ifdef LOG_ENABLED
9124 iemLogCurInstr(pVCpu, false, pszFunction);
9125#endif
9126#ifdef IEM_WITH_SETJMP
9127 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9128 {
9129 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9130 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9131 }
9132 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9133 {
9134 pVCpu->iem.s.cLongJumps++;
9135 }
9136 IEM_CATCH_LONGJMP_END(pVCpu);
9137#else
9138 IEM_OPCODE_GET_FIRST_U8(&b);
9139 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9140#endif
9141 if (rcStrict == VINF_SUCCESS)
9142 {
9143 pVCpu->iem.s.cInstructions++;
9144#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9145 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9146 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9147 { /* likely */ }
9148 else
9149 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9150#endif
9151 }
9152 if (pVCpu->iem.s.cActiveMappings > 0)
9153 {
9154 Assert(rcStrict != VINF_SUCCESS);
9155 iemMemRollback(pVCpu);
9156 }
9157 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9158 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9159 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9160 }
9161 else if (pVCpu->iem.s.cActiveMappings > 0)
9162 iemMemRollback(pVCpu);
9163 /** @todo drop this after we bake this change into RIP advancing. */
9164 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9165 }
9166
9167 /*
9168 * Return value fiddling, statistics and sanity assertions.
9169 */
9170 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9171
9172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9174 return rcStrict;
9175}
9176
9177
9178/**
9179 * Execute one instruction.
9180 *
9181 * @return Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9183 */
9184VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9185{
9186 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9187#ifdef LOG_ENABLED
9188 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9189#endif
9190
9191 /*
9192 * Do the decoding and emulation.
9193 */
9194 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9195 if (rcStrict == VINF_SUCCESS)
9196 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9197 else if (pVCpu->iem.s.cActiveMappings > 0)
9198 iemMemRollback(pVCpu);
9199
9200 if (rcStrict != VINF_SUCCESS)
9201 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9202 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9203 return rcStrict;
9204}
9205
9206
9207VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9208{
9209 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9210 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9211 if (rcStrict == VINF_SUCCESS)
9212 {
9213 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9214 if (pcbWritten)
9215 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9216 }
9217 else if (pVCpu->iem.s.cActiveMappings > 0)
9218 iemMemRollback(pVCpu);
9219
9220 return rcStrict;
9221}
9222
9223
9224VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9225 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9226{
9227 VBOXSTRICTRC rcStrict;
9228 if ( cbOpcodeBytes
9229 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9230 {
9231 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9232#ifdef IEM_WITH_CODE_TLB
9233 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9234 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9235 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9236 pVCpu->iem.s.offCurInstrStart = 0;
9237 pVCpu->iem.s.offInstrNextByte = 0;
9238 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9239#else
9240 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9241 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9242#endif
9243 rcStrict = VINF_SUCCESS;
9244 }
9245 else
9246 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9247 if (rcStrict == VINF_SUCCESS)
9248 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9249 else if (pVCpu->iem.s.cActiveMappings > 0)
9250 iemMemRollback(pVCpu);
9251
9252 return rcStrict;
9253}
9254
9255
9256VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9257{
9258 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9259 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9260 if (rcStrict == VINF_SUCCESS)
9261 {
9262 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9263 if (pcbWritten)
9264 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9265 }
9266 else if (pVCpu->iem.s.cActiveMappings > 0)
9267 iemMemRollback(pVCpu);
9268
9269 return rcStrict;
9270}
9271
9272
9273VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9274 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9275{
9276 VBOXSTRICTRC rcStrict;
9277 if ( cbOpcodeBytes
9278 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9279 {
9280 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9281#ifdef IEM_WITH_CODE_TLB
9282 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9283 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9284 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9285 pVCpu->iem.s.offCurInstrStart = 0;
9286 pVCpu->iem.s.offInstrNextByte = 0;
9287 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9288#else
9289 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9290 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9291#endif
9292 rcStrict = VINF_SUCCESS;
9293 }
9294 else
9295 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9296 if (rcStrict == VINF_SUCCESS)
9297 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9298 else if (pVCpu->iem.s.cActiveMappings > 0)
9299 iemMemRollback(pVCpu);
9300
9301 return rcStrict;
9302}
9303
9304
9305/**
9306 * For handling split cacheline lock operations when the host has split-lock
9307 * detection enabled.
9308 *
9309 * This will cause the interpreter to disregard the lock prefix and implicit
9310 * locking (xchg).
9311 *
9312 * @returns Strict VBox status code.
9313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9314 */
9315VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9316{
9317 /*
9318 * Do the decoding and emulation.
9319 */
9320 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9321 if (rcStrict == VINF_SUCCESS)
9322 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9323 else if (pVCpu->iem.s.cActiveMappings > 0)
9324 iemMemRollback(pVCpu);
9325
9326 if (rcStrict != VINF_SUCCESS)
9327 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9328 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9329 return rcStrict;
9330}
9331
9332
9333/**
9334 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9335 * inject a pending TRPM trap.
9336 */
9337VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9338{
9339 Assert(TRPMHasTrap(pVCpu));
9340
9341 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9342 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9343 {
9344 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9345#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9346 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9347 if (fIntrEnabled)
9348 {
9349 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9350 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9351 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9352 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9353 else
9354 {
9355 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9356 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9357 }
9358 }
9359#else
9360 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9361#endif
9362 if (fIntrEnabled)
9363 {
9364 uint8_t u8TrapNo;
9365 TRPMEVENT enmType;
9366 uint32_t uErrCode;
9367 RTGCPTR uCr2;
9368 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9369 AssertRC(rc2);
9370 Assert(enmType == TRPM_HARDWARE_INT);
9371 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9372
9373 TRPMResetTrap(pVCpu);
9374
9375#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9376 /* Injecting an event may cause a VM-exit. */
9377 if ( rcStrict != VINF_SUCCESS
9378 && rcStrict != VINF_IEM_RAISED_XCPT)
9379 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9380#else
9381 NOREF(rcStrict);
9382#endif
9383 }
9384 }
9385
9386 return VINF_SUCCESS;
9387}
9388
9389
9390VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9391{
9392 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9393 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9394 Assert(cMaxInstructions > 0);
9395
9396 /*
9397 * See if there is an interrupt pending in TRPM, inject it if we can.
9398 */
9399 /** @todo What if we are injecting an exception and not an interrupt? Is that
9400 * possible here? For now we assert it is indeed only an interrupt. */
9401 if (!TRPMHasTrap(pVCpu))
9402 { /* likely */ }
9403 else
9404 {
9405 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9406 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9407 { /*likely */ }
9408 else
9409 return rcStrict;
9410 }
9411
9412 /*
9413 * Initial decoder init w/ prefetch, then setup setjmp.
9414 */
9415 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9416 if (rcStrict == VINF_SUCCESS)
9417 {
9418#ifdef IEM_WITH_SETJMP
9419 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9420 IEM_TRY_SETJMP(pVCpu, rcStrict)
9421#endif
9422 {
9423 /*
9424 * The run loop. We limit ourselves to 4096 instructions right now.
9425 */
9426 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9427 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9428 for (;;)
9429 {
9430 /*
9431 * Log the state.
9432 */
9433#ifdef LOG_ENABLED
9434 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9435#endif
9436
9437 /*
9438 * Do the decoding and emulation.
9439 */
9440 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9441 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9442#ifdef VBOX_STRICT
9443 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9444#endif
9445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9446 {
9447 Assert(pVCpu->iem.s.cActiveMappings == 0);
9448 pVCpu->iem.s.cInstructions++;
9449
9450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9451 /* Perform any VMX nested-guest instruction boundary actions. */
9452 uint64_t fCpu = pVCpu->fLocalForcedActions;
9453 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9454 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9455 { /* likely */ }
9456 else
9457 {
9458 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9459 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9460 fCpu = pVCpu->fLocalForcedActions;
9461 else
9462 {
9463 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9464 break;
9465 }
9466 }
9467#endif
9468 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9469 {
9470#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9471 uint64_t fCpu = pVCpu->fLocalForcedActions;
9472#endif
9473 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9474 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9475 | VMCPU_FF_TLB_FLUSH
9476 | VMCPU_FF_UNHALT );
9477
9478 if (RT_LIKELY( ( !fCpu
9479 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9480 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9481 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9482 {
9483 if (--cMaxInstructionsGccStupidity > 0)
9484 {
9485 /* Poll timers every now an then according to the caller's specs. */
9486 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9487 || !TMTimerPollBool(pVM, pVCpu))
9488 {
9489 Assert(pVCpu->iem.s.cActiveMappings == 0);
9490 iemReInitDecoder(pVCpu);
9491 continue;
9492 }
9493 }
9494 }
9495 }
9496 Assert(pVCpu->iem.s.cActiveMappings == 0);
9497 }
9498 else if (pVCpu->iem.s.cActiveMappings > 0)
9499 iemMemRollback(pVCpu);
9500 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9501 break;
9502 }
9503 }
9504#ifdef IEM_WITH_SETJMP
9505 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9506 {
9507 if (pVCpu->iem.s.cActiveMappings > 0)
9508 iemMemRollback(pVCpu);
9509# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9510 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9511# endif
9512 pVCpu->iem.s.cLongJumps++;
9513 }
9514 IEM_CATCH_LONGJMP_END(pVCpu);
9515#endif
9516
9517 /*
9518 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9519 */
9520 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9521 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9522 }
9523 else
9524 {
9525 if (pVCpu->iem.s.cActiveMappings > 0)
9526 iemMemRollback(pVCpu);
9527
9528#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9529 /*
9530 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9531 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9532 */
9533 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9534#endif
9535 }
9536
9537 /*
9538 * Maybe re-enter raw-mode and log.
9539 */
9540 if (rcStrict != VINF_SUCCESS)
9541 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9542 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9543 if (pcInstructions)
9544 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9545 return rcStrict;
9546}
9547
9548
9549/**
9550 * Interface used by EMExecuteExec, does exit statistics and limits.
9551 *
9552 * @returns Strict VBox status code.
9553 * @param pVCpu The cross context virtual CPU structure.
9554 * @param fWillExit To be defined.
9555 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9556 * @param cMaxInstructions Maximum number of instructions to execute.
9557 * @param cMaxInstructionsWithoutExits
9558 * The max number of instructions without exits.
9559 * @param pStats Where to return statistics.
9560 */
9561VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9562 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9563{
9564 NOREF(fWillExit); /** @todo define flexible exit crits */
9565
9566 /*
9567 * Initialize return stats.
9568 */
9569 pStats->cInstructions = 0;
9570 pStats->cExits = 0;
9571 pStats->cMaxExitDistance = 0;
9572 pStats->cReserved = 0;
9573
9574 /*
9575 * Initial decoder init w/ prefetch, then setup setjmp.
9576 */
9577 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9578 if (rcStrict == VINF_SUCCESS)
9579 {
9580#ifdef IEM_WITH_SETJMP
9581 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9582 IEM_TRY_SETJMP(pVCpu, rcStrict)
9583#endif
9584 {
9585#ifdef IN_RING0
9586 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9587#endif
9588 uint32_t cInstructionSinceLastExit = 0;
9589
9590 /*
9591 * The run loop. We limit ourselves to 4096 instructions right now.
9592 */
9593 PVM pVM = pVCpu->CTX_SUFF(pVM);
9594 for (;;)
9595 {
9596 /*
9597 * Log the state.
9598 */
9599#ifdef LOG_ENABLED
9600 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9601#endif
9602
9603 /*
9604 * Do the decoding and emulation.
9605 */
9606 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9607
9608 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9609 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9610
9611 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9612 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9613 {
9614 pStats->cExits += 1;
9615 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9616 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9617 cInstructionSinceLastExit = 0;
9618 }
9619
9620 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9621 {
9622 Assert(pVCpu->iem.s.cActiveMappings == 0);
9623 pVCpu->iem.s.cInstructions++;
9624 pStats->cInstructions++;
9625 cInstructionSinceLastExit++;
9626
9627#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9628 /* Perform any VMX nested-guest instruction boundary actions. */
9629 uint64_t fCpu = pVCpu->fLocalForcedActions;
9630 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9631 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9632 { /* likely */ }
9633 else
9634 {
9635 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9636 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9637 fCpu = pVCpu->fLocalForcedActions;
9638 else
9639 {
9640 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9641 break;
9642 }
9643 }
9644#endif
9645 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9646 {
9647#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9648 uint64_t fCpu = pVCpu->fLocalForcedActions;
9649#endif
9650 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9651 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9652 | VMCPU_FF_TLB_FLUSH
9653 | VMCPU_FF_UNHALT );
9654 if (RT_LIKELY( ( ( !fCpu
9655 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9656 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9657 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9658 || pStats->cInstructions < cMinInstructions))
9659 {
9660 if (pStats->cInstructions < cMaxInstructions)
9661 {
9662 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9663 {
9664#ifdef IN_RING0
9665 if ( !fCheckPreemptionPending
9666 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9667#endif
9668 {
9669 Assert(pVCpu->iem.s.cActiveMappings == 0);
9670 iemReInitDecoder(pVCpu);
9671 continue;
9672 }
9673#ifdef IN_RING0
9674 rcStrict = VINF_EM_RAW_INTERRUPT;
9675 break;
9676#endif
9677 }
9678 }
9679 }
9680 Assert(!(fCpu & VMCPU_FF_IEM));
9681 }
9682 Assert(pVCpu->iem.s.cActiveMappings == 0);
9683 }
9684 else if (pVCpu->iem.s.cActiveMappings > 0)
9685 iemMemRollback(pVCpu);
9686 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9687 break;
9688 }
9689 }
9690#ifdef IEM_WITH_SETJMP
9691 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9692 {
9693 if (pVCpu->iem.s.cActiveMappings > 0)
9694 iemMemRollback(pVCpu);
9695 pVCpu->iem.s.cLongJumps++;
9696 }
9697 IEM_CATCH_LONGJMP_END(pVCpu);
9698#endif
9699
9700 /*
9701 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9702 */
9703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9704 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9705 }
9706 else
9707 {
9708 if (pVCpu->iem.s.cActiveMappings > 0)
9709 iemMemRollback(pVCpu);
9710
9711#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9712 /*
9713 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9714 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9715 */
9716 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9717#endif
9718 }
9719
9720 /*
9721 * Maybe re-enter raw-mode and log.
9722 */
9723 if (rcStrict != VINF_SUCCESS)
9724 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9725 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9726 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9727 return rcStrict;
9728}
9729
9730
9731/**
9732 * Injects a trap, fault, abort, software interrupt or external interrupt.
9733 *
9734 * The parameter list matches TRPMQueryTrapAll pretty closely.
9735 *
9736 * @returns Strict VBox status code.
9737 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9738 * @param u8TrapNo The trap number.
9739 * @param enmType What type is it (trap/fault/abort), software
9740 * interrupt or hardware interrupt.
9741 * @param uErrCode The error code if applicable.
9742 * @param uCr2 The CR2 value if applicable.
9743 * @param cbInstr The instruction length (only relevant for
9744 * software interrupts).
9745 */
9746VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9747 uint8_t cbInstr)
9748{
9749 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9750#ifdef DBGFTRACE_ENABLED
9751 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9752 u8TrapNo, enmType, uErrCode, uCr2);
9753#endif
9754
9755 uint32_t fFlags;
9756 switch (enmType)
9757 {
9758 case TRPM_HARDWARE_INT:
9759 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9760 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9761 uErrCode = uCr2 = 0;
9762 break;
9763
9764 case TRPM_SOFTWARE_INT:
9765 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9766 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9767 uErrCode = uCr2 = 0;
9768 break;
9769
9770 case TRPM_TRAP:
9771 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9772 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9773 if (u8TrapNo == X86_XCPT_PF)
9774 fFlags |= IEM_XCPT_FLAGS_CR2;
9775 switch (u8TrapNo)
9776 {
9777 case X86_XCPT_DF:
9778 case X86_XCPT_TS:
9779 case X86_XCPT_NP:
9780 case X86_XCPT_SS:
9781 case X86_XCPT_PF:
9782 case X86_XCPT_AC:
9783 case X86_XCPT_GP:
9784 fFlags |= IEM_XCPT_FLAGS_ERR;
9785 break;
9786 }
9787 break;
9788
9789 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9790 }
9791
9792 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9793
9794 if (pVCpu->iem.s.cActiveMappings > 0)
9795 iemMemRollback(pVCpu);
9796
9797 return rcStrict;
9798}
9799
9800
9801/**
9802 * Injects the active TRPM event.
9803 *
9804 * @returns Strict VBox status code.
9805 * @param pVCpu The cross context virtual CPU structure.
9806 */
9807VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9808{
9809#ifndef IEM_IMPLEMENTS_TASKSWITCH
9810 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9811#else
9812 uint8_t u8TrapNo;
9813 TRPMEVENT enmType;
9814 uint32_t uErrCode;
9815 RTGCUINTPTR uCr2;
9816 uint8_t cbInstr;
9817 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9818 if (RT_FAILURE(rc))
9819 return rc;
9820
9821 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9822 * ICEBP \#DB injection as a special case. */
9823 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9824#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9825 if (rcStrict == VINF_SVM_VMEXIT)
9826 rcStrict = VINF_SUCCESS;
9827#endif
9828#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9829 if (rcStrict == VINF_VMX_VMEXIT)
9830 rcStrict = VINF_SUCCESS;
9831#endif
9832 /** @todo Are there any other codes that imply the event was successfully
9833 * delivered to the guest? See @bugref{6607}. */
9834 if ( rcStrict == VINF_SUCCESS
9835 || rcStrict == VINF_IEM_RAISED_XCPT)
9836 TRPMResetTrap(pVCpu);
9837
9838 return rcStrict;
9839#endif
9840}
9841
9842
9843VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9844{
9845 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9846 return VERR_NOT_IMPLEMENTED;
9847}
9848
9849
9850VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9851{
9852 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9853 return VERR_NOT_IMPLEMENTED;
9854}
9855
9856
9857/**
9858 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9859 *
9860 * This API ASSUMES that the caller has already verified that the guest code is
9861 * allowed to access the I/O port. (The I/O port is in the DX register in the
9862 * guest state.)
9863 *
9864 * @returns Strict VBox status code.
9865 * @param pVCpu The cross context virtual CPU structure.
9866 * @param cbValue The size of the I/O port access (1, 2, or 4).
9867 * @param enmAddrMode The addressing mode.
9868 * @param fRepPrefix Indicates whether a repeat prefix is used
9869 * (doesn't matter which for this instruction).
9870 * @param cbInstr The instruction length in bytes.
9871 * @param iEffSeg The effective segment address.
9872 * @param fIoChecked Whether the access to the I/O port has been
9873 * checked or not. It's typically checked in the
9874 * HM scenario.
9875 */
9876VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9877 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9878{
9879 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9880 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9881
9882 /*
9883 * State init.
9884 */
9885 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9886
9887 /*
9888 * Switch orgy for getting to the right handler.
9889 */
9890 VBOXSTRICTRC rcStrict;
9891 if (fRepPrefix)
9892 {
9893 switch (enmAddrMode)
9894 {
9895 case IEMMODE_16BIT:
9896 switch (cbValue)
9897 {
9898 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9899 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9900 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9901 default:
9902 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9903 }
9904 break;
9905
9906 case IEMMODE_32BIT:
9907 switch (cbValue)
9908 {
9909 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9910 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9911 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9912 default:
9913 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9914 }
9915 break;
9916
9917 case IEMMODE_64BIT:
9918 switch (cbValue)
9919 {
9920 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9921 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9922 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9923 default:
9924 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9925 }
9926 break;
9927
9928 default:
9929 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9930 }
9931 }
9932 else
9933 {
9934 switch (enmAddrMode)
9935 {
9936 case IEMMODE_16BIT:
9937 switch (cbValue)
9938 {
9939 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9940 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9941 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9942 default:
9943 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9944 }
9945 break;
9946
9947 case IEMMODE_32BIT:
9948 switch (cbValue)
9949 {
9950 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9951 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9952 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9953 default:
9954 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9955 }
9956 break;
9957
9958 case IEMMODE_64BIT:
9959 switch (cbValue)
9960 {
9961 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9962 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9963 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9964 default:
9965 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9966 }
9967 break;
9968
9969 default:
9970 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9971 }
9972 }
9973
9974 if (pVCpu->iem.s.cActiveMappings)
9975 iemMemRollback(pVCpu);
9976
9977 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9978}
9979
9980
9981/**
9982 * Interface for HM and EM for executing string I/O IN (read) instructions.
9983 *
9984 * This API ASSUMES that the caller has already verified that the guest code is
9985 * allowed to access the I/O port. (The I/O port is in the DX register in the
9986 * guest state.)
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure.
9990 * @param cbValue The size of the I/O port access (1, 2, or 4).
9991 * @param enmAddrMode The addressing mode.
9992 * @param fRepPrefix Indicates whether a repeat prefix is used
9993 * (doesn't matter which for this instruction).
9994 * @param cbInstr The instruction length in bytes.
9995 * @param fIoChecked Whether the access to the I/O port has been
9996 * checked or not. It's typically checked in the
9997 * HM scenario.
9998 */
9999VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10000 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10001{
10002 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10003
10004 /*
10005 * State init.
10006 */
10007 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10008
10009 /*
10010 * Switch orgy for getting to the right handler.
10011 */
10012 VBOXSTRICTRC rcStrict;
10013 if (fRepPrefix)
10014 {
10015 switch (enmAddrMode)
10016 {
10017 case IEMMODE_16BIT:
10018 switch (cbValue)
10019 {
10020 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10021 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10022 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10023 default:
10024 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10025 }
10026 break;
10027
10028 case IEMMODE_32BIT:
10029 switch (cbValue)
10030 {
10031 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10032 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10033 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10034 default:
10035 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10036 }
10037 break;
10038
10039 case IEMMODE_64BIT:
10040 switch (cbValue)
10041 {
10042 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10043 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10044 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10045 default:
10046 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10047 }
10048 break;
10049
10050 default:
10051 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10052 }
10053 }
10054 else
10055 {
10056 switch (enmAddrMode)
10057 {
10058 case IEMMODE_16BIT:
10059 switch (cbValue)
10060 {
10061 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10062 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10063 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10064 default:
10065 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10066 }
10067 break;
10068
10069 case IEMMODE_32BIT:
10070 switch (cbValue)
10071 {
10072 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10073 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10074 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10075 default:
10076 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10077 }
10078 break;
10079
10080 case IEMMODE_64BIT:
10081 switch (cbValue)
10082 {
10083 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10084 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10085 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10086 default:
10087 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10088 }
10089 break;
10090
10091 default:
10092 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10093 }
10094 }
10095
10096 if ( pVCpu->iem.s.cActiveMappings == 0
10097 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10098 { /* likely */ }
10099 else
10100 {
10101 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10102 iemMemRollback(pVCpu);
10103 }
10104 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10105}
10106
10107
10108/**
10109 * Interface for rawmode to write execute an OUT instruction.
10110 *
10111 * @returns Strict VBox status code.
10112 * @param pVCpu The cross context virtual CPU structure.
10113 * @param cbInstr The instruction length in bytes.
10114 * @param u16Port The port to read.
10115 * @param fImm Whether the port is specified using an immediate operand or
10116 * using the implicit DX register.
10117 * @param cbReg The register size.
10118 *
10119 * @remarks In ring-0 not all of the state needs to be synced in.
10120 */
10121VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10122{
10123 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10124 Assert(cbReg <= 4 && cbReg != 3);
10125
10126 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10127 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10128 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10129 Assert(!pVCpu->iem.s.cActiveMappings);
10130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10131}
10132
10133
10134/**
10135 * Interface for rawmode to write execute an IN instruction.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure.
10139 * @param cbInstr The instruction length in bytes.
10140 * @param u16Port The port to read.
10141 * @param fImm Whether the port is specified using an immediate operand or
10142 * using the implicit DX.
10143 * @param cbReg The register size.
10144 */
10145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10146{
10147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10148 Assert(cbReg <= 4 && cbReg != 3);
10149
10150 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10151 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10152 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10153 Assert(!pVCpu->iem.s.cActiveMappings);
10154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10155}
10156
10157
10158/**
10159 * Interface for HM and EM to write to a CRx register.
10160 *
10161 * @returns Strict VBox status code.
10162 * @param pVCpu The cross context virtual CPU structure.
10163 * @param cbInstr The instruction length in bytes.
10164 * @param iCrReg The control register number (destination).
10165 * @param iGReg The general purpose register number (source).
10166 *
10167 * @remarks In ring-0 not all of the state needs to be synced in.
10168 */
10169VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10170{
10171 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10172 Assert(iCrReg < 16);
10173 Assert(iGReg < 16);
10174
10175 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10176 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10177 Assert(!pVCpu->iem.s.cActiveMappings);
10178 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10179}
10180
10181
10182/**
10183 * Interface for HM and EM to read from a CRx register.
10184 *
10185 * @returns Strict VBox status code.
10186 * @param pVCpu The cross context virtual CPU structure.
10187 * @param cbInstr The instruction length in bytes.
10188 * @param iGReg The general purpose register number (destination).
10189 * @param iCrReg The control register number (source).
10190 *
10191 * @remarks In ring-0 not all of the state needs to be synced in.
10192 */
10193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10194{
10195 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10196 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10197 | CPUMCTX_EXTRN_APIC_TPR);
10198 Assert(iCrReg < 16);
10199 Assert(iGReg < 16);
10200
10201 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10202 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10203 Assert(!pVCpu->iem.s.cActiveMappings);
10204 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10205}
10206
10207
10208/**
10209 * Interface for HM and EM to write to a DRx register.
10210 *
10211 * @returns Strict VBox status code.
10212 * @param pVCpu The cross context virtual CPU structure.
10213 * @param cbInstr The instruction length in bytes.
10214 * @param iDrReg The debug register number (destination).
10215 * @param iGReg The general purpose register number (source).
10216 *
10217 * @remarks In ring-0 not all of the state needs to be synced in.
10218 */
10219VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10220{
10221 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10222 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10223 Assert(iDrReg < 8);
10224 Assert(iGReg < 16);
10225
10226 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10228 Assert(!pVCpu->iem.s.cActiveMappings);
10229 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10230}
10231
10232
10233/**
10234 * Interface for HM and EM to read from a DRx register.
10235 *
10236 * @returns Strict VBox status code.
10237 * @param pVCpu The cross context virtual CPU structure.
10238 * @param cbInstr The instruction length in bytes.
10239 * @param iGReg The general purpose register number (destination).
10240 * @param iDrReg The debug register number (source).
10241 *
10242 * @remarks In ring-0 not all of the state needs to be synced in.
10243 */
10244VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10245{
10246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10247 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10248 Assert(iDrReg < 8);
10249 Assert(iGReg < 16);
10250
10251 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10252 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10253 Assert(!pVCpu->iem.s.cActiveMappings);
10254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10255}
10256
10257
10258/**
10259 * Interface for HM and EM to clear the CR0[TS] bit.
10260 *
10261 * @returns Strict VBox status code.
10262 * @param pVCpu The cross context virtual CPU structure.
10263 * @param cbInstr The instruction length in bytes.
10264 *
10265 * @remarks In ring-0 not all of the state needs to be synced in.
10266 */
10267VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10268{
10269 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10270
10271 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10272 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10273 Assert(!pVCpu->iem.s.cActiveMappings);
10274 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10275}
10276
10277
10278/**
10279 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10280 *
10281 * @returns Strict VBox status code.
10282 * @param pVCpu The cross context virtual CPU structure.
10283 * @param cbInstr The instruction length in bytes.
10284 * @param uValue The value to load into CR0.
10285 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10286 * memory operand. Otherwise pass NIL_RTGCPTR.
10287 *
10288 * @remarks In ring-0 not all of the state needs to be synced in.
10289 */
10290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10291{
10292 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10293
10294 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10295 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10296 Assert(!pVCpu->iem.s.cActiveMappings);
10297 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10298}
10299
10300
10301/**
10302 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10303 *
10304 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10305 *
10306 * @returns Strict VBox status code.
10307 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10308 * @param cbInstr The instruction length in bytes.
10309 * @remarks In ring-0 not all of the state needs to be synced in.
10310 * @thread EMT(pVCpu)
10311 */
10312VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10313{
10314 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10315
10316 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10317 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10318 Assert(!pVCpu->iem.s.cActiveMappings);
10319 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10320}
10321
10322
10323/**
10324 * Interface for HM and EM to emulate the WBINVD instruction.
10325 *
10326 * @returns Strict VBox status code.
10327 * @param pVCpu The cross context virtual CPU structure.
10328 * @param cbInstr The instruction length in bytes.
10329 *
10330 * @remarks In ring-0 not all of the state needs to be synced in.
10331 */
10332VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10333{
10334 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10335
10336 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10337 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10338 Assert(!pVCpu->iem.s.cActiveMappings);
10339 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10340}
10341
10342
10343/**
10344 * Interface for HM and EM to emulate the INVD instruction.
10345 *
10346 * @returns Strict VBox status code.
10347 * @param pVCpu The cross context virtual CPU structure.
10348 * @param cbInstr The instruction length in bytes.
10349 *
10350 * @remarks In ring-0 not all of the state needs to be synced in.
10351 */
10352VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10353{
10354 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10355
10356 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10357 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10358 Assert(!pVCpu->iem.s.cActiveMappings);
10359 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10360}
10361
10362
10363/**
10364 * Interface for HM and EM to emulate the INVLPG instruction.
10365 *
10366 * @returns Strict VBox status code.
10367 * @retval VINF_PGM_SYNC_CR3
10368 *
10369 * @param pVCpu The cross context virtual CPU structure.
10370 * @param cbInstr The instruction length in bytes.
10371 * @param GCPtrPage The effective address of the page to invalidate.
10372 *
10373 * @remarks In ring-0 not all of the state needs to be synced in.
10374 */
10375VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10376{
10377 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10378
10379 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10380 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10381 Assert(!pVCpu->iem.s.cActiveMappings);
10382 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10383}
10384
10385
10386/**
10387 * Interface for HM and EM to emulate the INVPCID instruction.
10388 *
10389 * @returns Strict VBox status code.
10390 * @retval VINF_PGM_SYNC_CR3
10391 *
10392 * @param pVCpu The cross context virtual CPU structure.
10393 * @param cbInstr The instruction length in bytes.
10394 * @param iEffSeg The effective segment register.
10395 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10396 * @param uType The invalidation type.
10397 *
10398 * @remarks In ring-0 not all of the state needs to be synced in.
10399 */
10400VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10401 uint64_t uType)
10402{
10403 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10404
10405 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10406 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10407 Assert(!pVCpu->iem.s.cActiveMappings);
10408 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10409}
10410
10411
10412/**
10413 * Interface for HM and EM to emulate the CPUID instruction.
10414 *
10415 * @returns Strict VBox status code.
10416 *
10417 * @param pVCpu The cross context virtual CPU structure.
10418 * @param cbInstr The instruction length in bytes.
10419 *
10420 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10421 */
10422VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10423{
10424 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10425 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10426
10427 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10428 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10429 Assert(!pVCpu->iem.s.cActiveMappings);
10430 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10431}
10432
10433
10434/**
10435 * Interface for HM and EM to emulate the RDPMC instruction.
10436 *
10437 * @returns Strict VBox status code.
10438 *
10439 * @param pVCpu The cross context virtual CPU structure.
10440 * @param cbInstr The instruction length in bytes.
10441 *
10442 * @remarks Not all of the state needs to be synced in.
10443 */
10444VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10445{
10446 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10447 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10448
10449 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10450 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10451 Assert(!pVCpu->iem.s.cActiveMappings);
10452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10453}
10454
10455
10456/**
10457 * Interface for HM and EM to emulate the RDTSC instruction.
10458 *
10459 * @returns Strict VBox status code.
10460 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10461 *
10462 * @param pVCpu The cross context virtual CPU structure.
10463 * @param cbInstr The instruction length in bytes.
10464 *
10465 * @remarks Not all of the state needs to be synced in.
10466 */
10467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10468{
10469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10470 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10471
10472 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10473 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10474 Assert(!pVCpu->iem.s.cActiveMappings);
10475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10476}
10477
10478
10479/**
10480 * Interface for HM and EM to emulate the RDTSCP instruction.
10481 *
10482 * @returns Strict VBox status code.
10483 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10484 *
10485 * @param pVCpu The cross context virtual CPU structure.
10486 * @param cbInstr The instruction length in bytes.
10487 *
10488 * @remarks Not all of the state needs to be synced in. Recommended
10489 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10490 */
10491VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10492{
10493 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10494 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10495
10496 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10497 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10498 Assert(!pVCpu->iem.s.cActiveMappings);
10499 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10500}
10501
10502
10503/**
10504 * Interface for HM and EM to emulate the RDMSR instruction.
10505 *
10506 * @returns Strict VBox status code.
10507 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10508 *
10509 * @param pVCpu The cross context virtual CPU structure.
10510 * @param cbInstr The instruction length in bytes.
10511 *
10512 * @remarks Not all of the state needs to be synced in. Requires RCX and
10513 * (currently) all MSRs.
10514 */
10515VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10516{
10517 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10518 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10519
10520 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10521 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10522 Assert(!pVCpu->iem.s.cActiveMappings);
10523 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10524}
10525
10526
10527/**
10528 * Interface for HM and EM to emulate the WRMSR instruction.
10529 *
10530 * @returns Strict VBox status code.
10531 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10532 *
10533 * @param pVCpu The cross context virtual CPU structure.
10534 * @param cbInstr The instruction length in bytes.
10535 *
10536 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10537 * and (currently) all MSRs.
10538 */
10539VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10540{
10541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10542 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10543 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10544
10545 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10546 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10547 Assert(!pVCpu->iem.s.cActiveMappings);
10548 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10549}
10550
10551
10552/**
10553 * Interface for HM and EM to emulate the MONITOR instruction.
10554 *
10555 * @returns Strict VBox status code.
10556 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10557 *
10558 * @param pVCpu The cross context virtual CPU structure.
10559 * @param cbInstr The instruction length in bytes.
10560 *
10561 * @remarks Not all of the state needs to be synced in.
10562 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10563 * are used.
10564 */
10565VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10566{
10567 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10568 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10569
10570 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10571 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10572 Assert(!pVCpu->iem.s.cActiveMappings);
10573 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10574}
10575
10576
10577/**
10578 * Interface for HM and EM to emulate the MWAIT instruction.
10579 *
10580 * @returns Strict VBox status code.
10581 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10582 *
10583 * @param pVCpu The cross context virtual CPU structure.
10584 * @param cbInstr The instruction length in bytes.
10585 *
10586 * @remarks Not all of the state needs to be synced in.
10587 */
10588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10589{
10590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10591 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10592
10593 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10595 Assert(!pVCpu->iem.s.cActiveMappings);
10596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10597}
10598
10599
10600/**
10601 * Interface for HM and EM to emulate the HLT instruction.
10602 *
10603 * @returns Strict VBox status code.
10604 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10605 *
10606 * @param pVCpu The cross context virtual CPU structure.
10607 * @param cbInstr The instruction length in bytes.
10608 *
10609 * @remarks Not all of the state needs to be synced in.
10610 */
10611VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10612{
10613 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10614
10615 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10617 Assert(!pVCpu->iem.s.cActiveMappings);
10618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10619}
10620
10621
10622/**
10623 * Checks if IEM is in the process of delivering an event (interrupt or
10624 * exception).
10625 *
10626 * @returns true if we're in the process of raising an interrupt or exception,
10627 * false otherwise.
10628 * @param pVCpu The cross context virtual CPU structure.
10629 * @param puVector Where to store the vector associated with the
10630 * currently delivered event, optional.
10631 * @param pfFlags Where to store th event delivery flags (see
10632 * IEM_XCPT_FLAGS_XXX), optional.
10633 * @param puErr Where to store the error code associated with the
10634 * event, optional.
10635 * @param puCr2 Where to store the CR2 associated with the event,
10636 * optional.
10637 * @remarks The caller should check the flags to determine if the error code and
10638 * CR2 are valid for the event.
10639 */
10640VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10641{
10642 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10643 if (fRaisingXcpt)
10644 {
10645 if (puVector)
10646 *puVector = pVCpu->iem.s.uCurXcpt;
10647 if (pfFlags)
10648 *pfFlags = pVCpu->iem.s.fCurXcpt;
10649 if (puErr)
10650 *puErr = pVCpu->iem.s.uCurXcptErr;
10651 if (puCr2)
10652 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10653 }
10654 return fRaisingXcpt;
10655}
10656
10657#ifdef IN_RING3
10658
10659/**
10660 * Handles the unlikely and probably fatal merge cases.
10661 *
10662 * @returns Merged status code.
10663 * @param rcStrict Current EM status code.
10664 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10665 * with @a rcStrict.
10666 * @param iMemMap The memory mapping index. For error reporting only.
10667 * @param pVCpu The cross context virtual CPU structure of the calling
10668 * thread, for error reporting only.
10669 */
10670DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10671 unsigned iMemMap, PVMCPUCC pVCpu)
10672{
10673 if (RT_FAILURE_NP(rcStrict))
10674 return rcStrict;
10675
10676 if (RT_FAILURE_NP(rcStrictCommit))
10677 return rcStrictCommit;
10678
10679 if (rcStrict == rcStrictCommit)
10680 return rcStrictCommit;
10681
10682 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10683 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10684 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10685 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10686 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10687 return VERR_IOM_FF_STATUS_IPE;
10688}
10689
10690
10691/**
10692 * Helper for IOMR3ProcessForceFlag.
10693 *
10694 * @returns Merged status code.
10695 * @param rcStrict Current EM status code.
10696 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10697 * with @a rcStrict.
10698 * @param iMemMap The memory mapping index. For error reporting only.
10699 * @param pVCpu The cross context virtual CPU structure of the calling
10700 * thread, for error reporting only.
10701 */
10702DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10703{
10704 /* Simple. */
10705 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10706 return rcStrictCommit;
10707
10708 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10709 return rcStrict;
10710
10711 /* EM scheduling status codes. */
10712 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10713 && rcStrict <= VINF_EM_LAST))
10714 {
10715 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10716 && rcStrictCommit <= VINF_EM_LAST))
10717 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10718 }
10719
10720 /* Unlikely */
10721 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10722}
10723
10724
10725/**
10726 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10727 *
10728 * @returns Merge between @a rcStrict and what the commit operation returned.
10729 * @param pVM The cross context VM structure.
10730 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10731 * @param rcStrict The status code returned by ring-0 or raw-mode.
10732 */
10733VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10734{
10735 /*
10736 * Reset the pending commit.
10737 */
10738 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10739 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10740 ("%#x %#x %#x\n",
10741 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10742 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10743
10744 /*
10745 * Commit the pending bounce buffers (usually just one).
10746 */
10747 unsigned cBufs = 0;
10748 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10749 while (iMemMap-- > 0)
10750 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10751 {
10752 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10753 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10754 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10755
10756 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10757 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10758 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10759
10760 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10761 {
10762 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10763 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10764 pbBuf,
10765 cbFirst,
10766 PGMACCESSORIGIN_IEM);
10767 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10768 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10769 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10770 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10771 }
10772
10773 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10774 {
10775 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10776 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10777 pbBuf + cbFirst,
10778 cbSecond,
10779 PGMACCESSORIGIN_IEM);
10780 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10781 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10782 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10783 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10784 }
10785 cBufs++;
10786 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10787 }
10788
10789 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10790 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10791 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10792 pVCpu->iem.s.cActiveMappings = 0;
10793 return rcStrict;
10794}
10795
10796#endif /* IN_RING3 */
10797
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette