VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 80317

最後變更 在這個檔案從80317是 80281,由 vboxsync 提交於 6 年 前

VMM,++: Refactoring code to use VMMC & VMMCPUCC. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 647.4 KB
 
1/* $Id: IEMAll.cpp 80281 2019-08-15 07:29:37Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define VBOX_BUGREF_9217_PART_I
89#define LOG_GROUP LOG_GROUP_IEM
90#define VMCPU_INCL_CPUM_GST_CTX
91#include <VBox/vmm/iem.h>
92#include <VBox/vmm/cpum.h>
93#include <VBox/vmm/apic.h>
94#include <VBox/vmm/pdm.h>
95#include <VBox/vmm/pgm.h>
96#include <VBox/vmm/iom.h>
97#include <VBox/vmm/em.h>
98#include <VBox/vmm/hm.h>
99#include <VBox/vmm/nem.h>
100#include <VBox/vmm/gim.h>
101#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
102# include <VBox/vmm/em.h>
103# include <VBox/vmm/hm_svm.h>
104#endif
105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
106# include <VBox/vmm/hmvmxinline.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#include "IEMInternal.h"
112#include <VBox/vmm/vmcc.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/asm-math.h>
119#include <iprt/assert.h>
120#include <iprt/string.h>
121#include <iprt/x86.h>
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127/** @typedef PFNIEMOP
128 * Pointer to an opcode decoder function.
129 */
130
131/** @def FNIEMOP_DEF
132 * Define an opcode decoder function.
133 *
134 * We're using macors for this so that adding and removing parameters as well as
135 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
136 *
137 * @param a_Name The function name.
138 */
139
140/** @typedef PFNIEMOPRM
141 * Pointer to an opcode decoder function with RM byte.
142 */
143
144/** @def FNIEMOPRM_DEF
145 * Define an opcode decoder function with RM byte.
146 *
147 * We're using macors for this so that adding and removing parameters as well as
148 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
149 *
150 * @param a_Name The function name.
151 */
152
153#if defined(__GNUC__) && defined(RT_ARCH_X86)
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
156# define FNIEMOP_DEF(a_Name) \
157 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
158# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
160# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
162
163#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
166# define FNIEMOP_DEF(a_Name) \
167 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
168# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
171 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
172
173#elif defined(__GNUC__)
174typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
175typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
176# define FNIEMOP_DEF(a_Name) \
177 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
178# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
180# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
181 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
182
183#else
184typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
185typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
186# define FNIEMOP_DEF(a_Name) \
187 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
188# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
190# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
191 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
192
193#endif
194#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
195
196
197/**
198 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
199 */
200typedef union IEMSELDESC
201{
202 /** The legacy view. */
203 X86DESC Legacy;
204 /** The long mode view. */
205 X86DESC64 Long;
206} IEMSELDESC;
207/** Pointer to a selector descriptor table entry. */
208typedef IEMSELDESC *PIEMSELDESC;
209
210/**
211 * CPU exception classes.
212 */
213typedef enum IEMXCPTCLASS
214{
215 IEMXCPTCLASS_BENIGN,
216 IEMXCPTCLASS_CONTRIBUTORY,
217 IEMXCPTCLASS_PAGE_FAULT,
218 IEMXCPTCLASS_DOUBLE_FAULT
219} IEMXCPTCLASS;
220
221
222/*********************************************************************************************************************************
223* Defined Constants And Macros *
224*********************************************************************************************************************************/
225/** @def IEM_WITH_SETJMP
226 * Enables alternative status code handling using setjmps.
227 *
228 * This adds a bit of expense via the setjmp() call since it saves all the
229 * non-volatile registers. However, it eliminates return code checks and allows
230 * for more optimal return value passing (return regs instead of stack buffer).
231 */
232#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
233# define IEM_WITH_SETJMP
234#endif
235
236/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
237 * due to GCC lacking knowledge about the value range of a switch. */
238#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
239
240/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
241#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
242
243/**
244 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
245 * occation.
246 */
247#ifdef LOG_ENABLED
248# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
249 do { \
250 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
251 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
252 } while (0)
253#else
254# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
255 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
256#endif
257
258/**
259 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
260 * occation using the supplied logger statement.
261 *
262 * @param a_LoggerArgs What to log on failure.
263 */
264#ifdef LOG_ENABLED
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
266 do { \
267 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
268 /*LogFunc(a_LoggerArgs);*/ \
269 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
270 } while (0)
271#else
272# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
273 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
274#endif
275
276/**
277 * Call an opcode decoder function.
278 *
279 * We're using macors for this so that adding and removing parameters can be
280 * done as we please. See FNIEMOP_DEF.
281 */
282#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
283
284/**
285 * Call a common opcode decoder function taking one extra argument.
286 *
287 * We're using macors for this so that adding and removing parameters can be
288 * done as we please. See FNIEMOP_DEF_1.
289 */
290#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
291
292/**
293 * Call a common opcode decoder function taking one extra argument.
294 *
295 * We're using macors for this so that adding and removing parameters can be
296 * done as we please. See FNIEMOP_DEF_1.
297 */
298#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
299
300/**
301 * Check if we're currently executing in real or virtual 8086 mode.
302 *
303 * @returns @c true if it is, @c false if not.
304 * @param a_pVCpu The IEM state of the current CPU.
305 */
306#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
307
308/**
309 * Check if we're currently executing in virtual 8086 mode.
310 *
311 * @returns @c true if it is, @c false if not.
312 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
313 */
314#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
315
316/**
317 * Check if we're currently executing in long mode.
318 *
319 * @returns @c true if it is, @c false if not.
320 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
321 */
322#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
323
324/**
325 * Check if we're currently executing in a 64-bit code segment.
326 *
327 * @returns @c true if it is, @c false if not.
328 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
329 */
330#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
331
332/**
333 * Check if we're currently executing in real mode.
334 *
335 * @returns @c true if it is, @c false if not.
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
346
347/**
348 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
349 * @returns PCCPUMFEATURES
350 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
351 */
352#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
353
354/**
355 * Evaluates to true if we're presenting an Intel CPU to the guest.
356 */
357#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
358
359/**
360 * Evaluates to true if we're presenting an AMD CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
363
364/**
365 * Check if the address is canonical.
366 */
367#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
368
369/**
370 * Gets the effective VEX.VVVV value.
371 *
372 * The 4th bit is ignored if not 64-bit code.
373 * @returns effective V-register value.
374 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
375 */
376#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
377 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
378
379/** @def IEM_USE_UNALIGNED_DATA_ACCESS
380 * Use unaligned accesses instead of elaborate byte assembly. */
381#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
382# define IEM_USE_UNALIGNED_DATA_ACCESS
383#endif
384
385#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
386
387/**
388 * Check if the guest has entered VMX root operation.
389 */
390# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
391
392/**
393 * Check if the guest has entered VMX non-root operation.
394 */
395# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the nested-guest has the given Pin-based VM-execution control set.
399 */
400# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
401 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
402
403/**
404 * Check if the nested-guest has the given Processor-based VM-execution control set.
405 */
406#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
407 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
408
409/**
410 * Check if the nested-guest has the given Secondary Processor-based VM-execution
411 * control set.
412 */
413#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
414 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
415
416/**
417 * Invokes the VMX VM-exit handler for an instruction intercept.
418 */
419# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
420 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
421
422/**
423 * Invokes the VMX VM-exit handler for an instruction intercept where the
424 * instruction provides additional VM-exit information.
425 */
426# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
427 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
428
429/**
430 * Invokes the VMX VM-exit handler for a task switch.
431 */
432# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
433 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
434
435/**
436 * Invokes the VMX VM-exit handler for MWAIT.
437 */
438# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
439 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
440
441/**
442 * Invokes the VMX VM-exit handler.
443 */
444# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
445 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
446
447#else
448# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
450# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
458
459#endif
460
461#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
462/**
463 * Check if an SVM control/instruction intercept is set.
464 */
465# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
466 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
467
468/**
469 * Check if an SVM read CRx intercept is set.
470 */
471# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
472 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
473
474/**
475 * Check if an SVM write CRx intercept is set.
476 */
477# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM read DRx intercept is set.
482 */
483# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
484 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
485
486/**
487 * Check if an SVM write DRx intercept is set.
488 */
489# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM exception intercept is set.
494 */
495# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
496 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
497
498/**
499 * Invokes the SVM \#VMEXIT handler for the nested-guest.
500 */
501# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
502 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
503
504/**
505 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
506 * corresponding decode assist information.
507 */
508# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
509 do \
510 { \
511 uint64_t uExitInfo1; \
512 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
513 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
514 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
515 else \
516 uExitInfo1 = 0; \
517 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
518 } while (0)
519
520/** Check and handles SVM nested-guest instruction intercept and updates
521 * NRIP if needed.
522 */
523# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
524 do \
525 { \
526 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
527 { \
528 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
529 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
530 } \
531 } while (0)
532
533/** Checks and handles SVM nested-guest CR0 read intercept. */
534# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
535 do \
536 { \
537 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
538 { /* probably likely */ } \
539 else \
540 { \
541 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
542 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
543 } \
544 } while (0)
545
546/**
547 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
548 */
549# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
550 do { \
551 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
552 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
553 } while (0)
554
555#else
556# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
557# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
559# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
561# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
562# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
564# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
566# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
567
568#endif
569
570
571/*********************************************************************************************************************************
572* Global Variables *
573*********************************************************************************************************************************/
574extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
575
576
577/** Function table for the ADD instruction. */
578IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
579{
580 iemAImpl_add_u8, iemAImpl_add_u8_locked,
581 iemAImpl_add_u16, iemAImpl_add_u16_locked,
582 iemAImpl_add_u32, iemAImpl_add_u32_locked,
583 iemAImpl_add_u64, iemAImpl_add_u64_locked
584};
585
586/** Function table for the ADC instruction. */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
588{
589 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
590 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
591 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
592 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
593};
594
595/** Function table for the SUB instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
597{
598 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
599 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
600 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
601 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
602};
603
604/** Function table for the SBB instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
606{
607 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
608 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
609 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
610 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
611};
612
613/** Function table for the OR instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
615{
616 iemAImpl_or_u8, iemAImpl_or_u8_locked,
617 iemAImpl_or_u16, iemAImpl_or_u16_locked,
618 iemAImpl_or_u32, iemAImpl_or_u32_locked,
619 iemAImpl_or_u64, iemAImpl_or_u64_locked
620};
621
622/** Function table for the XOR instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
624{
625 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
626 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
627 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
628 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
629};
630
631/** Function table for the AND instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
633{
634 iemAImpl_and_u8, iemAImpl_and_u8_locked,
635 iemAImpl_and_u16, iemAImpl_and_u16_locked,
636 iemAImpl_and_u32, iemAImpl_and_u32_locked,
637 iemAImpl_and_u64, iemAImpl_and_u64_locked
638};
639
640/** Function table for the CMP instruction.
641 * @remarks Making operand order ASSUMPTIONS.
642 */
643IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
644{
645 iemAImpl_cmp_u8, NULL,
646 iemAImpl_cmp_u16, NULL,
647 iemAImpl_cmp_u32, NULL,
648 iemAImpl_cmp_u64, NULL
649};
650
651/** Function table for the TEST instruction.
652 * @remarks Making operand order ASSUMPTIONS.
653 */
654IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
655{
656 iemAImpl_test_u8, NULL,
657 iemAImpl_test_u16, NULL,
658 iemAImpl_test_u32, NULL,
659 iemAImpl_test_u64, NULL
660};
661
662/** Function table for the BT instruction. */
663IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
664{
665 NULL, NULL,
666 iemAImpl_bt_u16, NULL,
667 iemAImpl_bt_u32, NULL,
668 iemAImpl_bt_u64, NULL
669};
670
671/** Function table for the BTC instruction. */
672IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
673{
674 NULL, NULL,
675 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
676 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
677 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
678};
679
680/** Function table for the BTR instruction. */
681IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
682{
683 NULL, NULL,
684 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
685 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
686 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
687};
688
689/** Function table for the BTS instruction. */
690IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
691{
692 NULL, NULL,
693 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
694 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
695 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
696};
697
698/** Function table for the BSF instruction. */
699IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
700{
701 NULL, NULL,
702 iemAImpl_bsf_u16, NULL,
703 iemAImpl_bsf_u32, NULL,
704 iemAImpl_bsf_u64, NULL
705};
706
707/** Function table for the BSR instruction. */
708IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
709{
710 NULL, NULL,
711 iemAImpl_bsr_u16, NULL,
712 iemAImpl_bsr_u32, NULL,
713 iemAImpl_bsr_u64, NULL
714};
715
716/** Function table for the IMUL instruction. */
717IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
718{
719 NULL, NULL,
720 iemAImpl_imul_two_u16, NULL,
721 iemAImpl_imul_two_u32, NULL,
722 iemAImpl_imul_two_u64, NULL
723};
724
725/** Group 1 /r lookup table. */
726IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
727{
728 &g_iemAImpl_add,
729 &g_iemAImpl_or,
730 &g_iemAImpl_adc,
731 &g_iemAImpl_sbb,
732 &g_iemAImpl_and,
733 &g_iemAImpl_sub,
734 &g_iemAImpl_xor,
735 &g_iemAImpl_cmp
736};
737
738/** Function table for the INC instruction. */
739IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
740{
741 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
742 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
743 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
744 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
745};
746
747/** Function table for the DEC instruction. */
748IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
749{
750 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
751 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
752 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
753 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
754};
755
756/** Function table for the NEG instruction. */
757IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
758{
759 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
760 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
761 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
762 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
763};
764
765/** Function table for the NOT instruction. */
766IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
767{
768 iemAImpl_not_u8, iemAImpl_not_u8_locked,
769 iemAImpl_not_u16, iemAImpl_not_u16_locked,
770 iemAImpl_not_u32, iemAImpl_not_u32_locked,
771 iemAImpl_not_u64, iemAImpl_not_u64_locked
772};
773
774
775/** Function table for the ROL instruction. */
776IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
777{
778 iemAImpl_rol_u8,
779 iemAImpl_rol_u16,
780 iemAImpl_rol_u32,
781 iemAImpl_rol_u64
782};
783
784/** Function table for the ROR instruction. */
785IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
786{
787 iemAImpl_ror_u8,
788 iemAImpl_ror_u16,
789 iemAImpl_ror_u32,
790 iemAImpl_ror_u64
791};
792
793/** Function table for the RCL instruction. */
794IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
795{
796 iemAImpl_rcl_u8,
797 iemAImpl_rcl_u16,
798 iemAImpl_rcl_u32,
799 iemAImpl_rcl_u64
800};
801
802/** Function table for the RCR instruction. */
803IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
804{
805 iemAImpl_rcr_u8,
806 iemAImpl_rcr_u16,
807 iemAImpl_rcr_u32,
808 iemAImpl_rcr_u64
809};
810
811/** Function table for the SHL instruction. */
812IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
813{
814 iemAImpl_shl_u8,
815 iemAImpl_shl_u16,
816 iemAImpl_shl_u32,
817 iemAImpl_shl_u64
818};
819
820/** Function table for the SHR instruction. */
821IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
822{
823 iemAImpl_shr_u8,
824 iemAImpl_shr_u16,
825 iemAImpl_shr_u32,
826 iemAImpl_shr_u64
827};
828
829/** Function table for the SAR instruction. */
830IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
831{
832 iemAImpl_sar_u8,
833 iemAImpl_sar_u16,
834 iemAImpl_sar_u32,
835 iemAImpl_sar_u64
836};
837
838
839/** Function table for the MUL instruction. */
840IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
841{
842 iemAImpl_mul_u8,
843 iemAImpl_mul_u16,
844 iemAImpl_mul_u32,
845 iemAImpl_mul_u64
846};
847
848/** Function table for the IMUL instruction working implicitly on rAX. */
849IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
850{
851 iemAImpl_imul_u8,
852 iemAImpl_imul_u16,
853 iemAImpl_imul_u32,
854 iemAImpl_imul_u64
855};
856
857/** Function table for the DIV instruction. */
858IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
859{
860 iemAImpl_div_u8,
861 iemAImpl_div_u16,
862 iemAImpl_div_u32,
863 iemAImpl_div_u64
864};
865
866/** Function table for the MUL instruction. */
867IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
868{
869 iemAImpl_idiv_u8,
870 iemAImpl_idiv_u16,
871 iemAImpl_idiv_u32,
872 iemAImpl_idiv_u64
873};
874
875/** Function table for the SHLD instruction */
876IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
877{
878 iemAImpl_shld_u16,
879 iemAImpl_shld_u32,
880 iemAImpl_shld_u64,
881};
882
883/** Function table for the SHRD instruction */
884IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
885{
886 iemAImpl_shrd_u16,
887 iemAImpl_shrd_u32,
888 iemAImpl_shrd_u64,
889};
890
891
892/** Function table for the PUNPCKLBW instruction */
893IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
894/** Function table for the PUNPCKLBD instruction */
895IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
896/** Function table for the PUNPCKLDQ instruction */
897IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
898/** Function table for the PUNPCKLQDQ instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
900
901/** Function table for the PUNPCKHBW instruction */
902IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
903/** Function table for the PUNPCKHBD instruction */
904IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
905/** Function table for the PUNPCKHDQ instruction */
906IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
907/** Function table for the PUNPCKHQDQ instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
909
910/** Function table for the PXOR instruction */
911IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
912/** Function table for the PCMPEQB instruction */
913IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
914/** Function table for the PCMPEQW instruction */
915IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
916/** Function table for the PCMPEQD instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
918
919
920#if defined(IEM_LOG_MEMORY_WRITES)
921/** What IEM just wrote. */
922uint8_t g_abIemWrote[256];
923/** How much IEM just wrote. */
924size_t g_cbIemWrote;
925#endif
926
927
928/*********************************************************************************************************************************
929* Internal Functions *
930*********************************************************************************************************************************/
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
935/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
937IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
939IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
946IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
947IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
948#ifdef IEM_WITH_SETJMP
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
954#endif
955
956IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971IEM_STATIC uint16_t iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 */
1197DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers)
1198{
1199 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1200 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1209
1210 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1211 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1212 pVCpu->iem.s.enmCpuMode = enmMode;
1213 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1214 pVCpu->iem.s.enmEffAddrMode = enmMode;
1215 if (enmMode != IEMMODE_64BIT)
1216 {
1217 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1218 pVCpu->iem.s.enmEffOpSize = enmMode;
1219 }
1220 else
1221 {
1222 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1223 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1224 }
1225 pVCpu->iem.s.fPrefixes = 0;
1226 pVCpu->iem.s.uRexReg = 0;
1227 pVCpu->iem.s.uRexB = 0;
1228 pVCpu->iem.s.uRexIndex = 0;
1229 pVCpu->iem.s.idxPrefix = 0;
1230 pVCpu->iem.s.uVex3rdReg = 0;
1231 pVCpu->iem.s.uVexLength = 0;
1232 pVCpu->iem.s.fEvexStuff = 0;
1233 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1234#ifdef IEM_WITH_CODE_TLB
1235 pVCpu->iem.s.pbInstrBuf = NULL;
1236 pVCpu->iem.s.offInstrNextByte = 0;
1237 pVCpu->iem.s.offCurInstrStart = 0;
1238# ifdef VBOX_STRICT
1239 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1240 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1241 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1242# endif
1243#else
1244 pVCpu->iem.s.offOpcode = 0;
1245 pVCpu->iem.s.cbOpcode = 0;
1246#endif
1247 pVCpu->iem.s.offModRm = 0;
1248 pVCpu->iem.s.cActiveMappings = 0;
1249 pVCpu->iem.s.iNextMapping = 0;
1250 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1251 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1252
1253#ifdef DBGFTRACE_ENABLED
1254 switch (enmMode)
1255 {
1256 case IEMMODE_64BIT:
1257 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1258 break;
1259 case IEMMODE_32BIT:
1260 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1261 break;
1262 case IEMMODE_16BIT:
1263 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1264 break;
1265 }
1266#endif
1267}
1268
1269
1270/**
1271 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1272 *
1273 * This is mostly a copy of iemInitDecoder.
1274 *
1275 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1276 */
1277DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1278{
1279 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1281 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1288
1289 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1290 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1291 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1292 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1293 pVCpu->iem.s.enmEffAddrMode = enmMode;
1294 if (enmMode != IEMMODE_64BIT)
1295 {
1296 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1297 pVCpu->iem.s.enmEffOpSize = enmMode;
1298 }
1299 else
1300 {
1301 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1302 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1303 }
1304 pVCpu->iem.s.fPrefixes = 0;
1305 pVCpu->iem.s.uRexReg = 0;
1306 pVCpu->iem.s.uRexB = 0;
1307 pVCpu->iem.s.uRexIndex = 0;
1308 pVCpu->iem.s.idxPrefix = 0;
1309 pVCpu->iem.s.uVex3rdReg = 0;
1310 pVCpu->iem.s.uVexLength = 0;
1311 pVCpu->iem.s.fEvexStuff = 0;
1312 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1313#ifdef IEM_WITH_CODE_TLB
1314 if (pVCpu->iem.s.pbInstrBuf)
1315 {
1316 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1317 - pVCpu->iem.s.uInstrBufPc;
1318 if (off < pVCpu->iem.s.cbInstrBufTotal)
1319 {
1320 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1321 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1322 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1323 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1324 else
1325 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1326 }
1327 else
1328 {
1329 pVCpu->iem.s.pbInstrBuf = NULL;
1330 pVCpu->iem.s.offInstrNextByte = 0;
1331 pVCpu->iem.s.offCurInstrStart = 0;
1332 pVCpu->iem.s.cbInstrBuf = 0;
1333 pVCpu->iem.s.cbInstrBufTotal = 0;
1334 }
1335 }
1336 else
1337 {
1338 pVCpu->iem.s.offInstrNextByte = 0;
1339 pVCpu->iem.s.offCurInstrStart = 0;
1340 pVCpu->iem.s.cbInstrBuf = 0;
1341 pVCpu->iem.s.cbInstrBufTotal = 0;
1342 }
1343#else
1344 pVCpu->iem.s.cbOpcode = 0;
1345 pVCpu->iem.s.offOpcode = 0;
1346#endif
1347 pVCpu->iem.s.offModRm = 0;
1348 Assert(pVCpu->iem.s.cActiveMappings == 0);
1349 pVCpu->iem.s.iNextMapping = 0;
1350 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1351 Assert(pVCpu->iem.s.fBypassHandlers == false);
1352
1353#ifdef DBGFTRACE_ENABLED
1354 switch (enmMode)
1355 {
1356 case IEMMODE_64BIT:
1357 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1358 break;
1359 case IEMMODE_32BIT:
1360 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1361 break;
1362 case IEMMODE_16BIT:
1363 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1364 break;
1365 }
1366#endif
1367}
1368
1369
1370
1371/**
1372 * Prefetch opcodes the first time when starting executing.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pVCpu The cross context virtual CPU structure of the
1376 * calling thread.
1377 * @param fBypassHandlers Whether to bypass access handlers.
1378 */
1379IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers)
1380{
1381 iemInitDecoder(pVCpu, fBypassHandlers);
1382
1383#ifdef IEM_WITH_CODE_TLB
1384 /** @todo Do ITLB lookup here. */
1385
1386#else /* !IEM_WITH_CODE_TLB */
1387
1388 /*
1389 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1390 *
1391 * First translate CS:rIP to a physical address.
1392 */
1393 uint32_t cbToTryRead;
1394 RTGCPTR GCPtrPC;
1395 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1396 {
1397 cbToTryRead = PAGE_SIZE;
1398 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1399 if (IEM_IS_CANONICAL(GCPtrPC))
1400 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1401 else
1402 return iemRaiseGeneralProtectionFault0(pVCpu);
1403 }
1404 else
1405 {
1406 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1407 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1408 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1409 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1410 else
1411 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1412 if (cbToTryRead) { /* likely */ }
1413 else /* overflowed */
1414 {
1415 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1416 cbToTryRead = UINT32_MAX;
1417 }
1418 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1419 Assert(GCPtrPC <= UINT32_MAX);
1420 }
1421
1422 RTGCPHYS GCPhys;
1423 uint64_t fFlags;
1424 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1425 if (RT_SUCCESS(rc)) { /* probable */ }
1426 else
1427 {
1428 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1429 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1430 }
1431 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1436 }
1437 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1444 /** @todo Check reserved bits and such stuff. PGM is better at doing
1445 * that, so do it when implementing the guest virtual address
1446 * TLB... */
1447
1448 /*
1449 * Read the bytes at this address.
1450 */
1451 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1452 if (cbToTryRead > cbLeftOnPage)
1453 cbToTryRead = cbLeftOnPage;
1454 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1455 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1456
1457 if (!pVCpu->iem.s.fBypassHandlers)
1458 {
1459 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1460 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1461 { /* likely */ }
1462 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1463 {
1464 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1465 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1466 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1467 }
1468 else
1469 {
1470 Log((RT_SUCCESS(rcStrict)
1471 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1472 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 return rcStrict;
1475 }
1476 }
1477 else
1478 {
1479 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1480 if (RT_SUCCESS(rc))
1481 { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1485 GCPtrPC, GCPhys, rc, cbToTryRead));
1486 return rc;
1487 }
1488 }
1489 pVCpu->iem.s.cbOpcode = cbToTryRead;
1490#endif /* !IEM_WITH_CODE_TLB */
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Invalidates the IEM TLBs.
1497 *
1498 * This is called internally as well as by PGM when moving GC mappings.
1499 *
1500 * @returns
1501 * @param pVCpu The cross context virtual CPU structure of the calling
1502 * thread.
1503 * @param fVmm Set when PGM calls us with a remapping.
1504 */
1505VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1506{
1507#ifdef IEM_WITH_CODE_TLB
1508 pVCpu->iem.s.cbInstrBufTotal = 0;
1509 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1510 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1511 { /* very likely */ }
1512 else
1513 {
1514 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1515 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1516 while (i-- > 0)
1517 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1518 }
1519#endif
1520
1521#ifdef IEM_WITH_DATA_TLB
1522 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1523 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1524 { /* very likely */ }
1525 else
1526 {
1527 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1528 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1529 while (i-- > 0)
1530 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1531 }
1532#endif
1533 NOREF(pVCpu); NOREF(fVmm);
1534}
1535
1536
1537/**
1538 * Invalidates a page in the TLBs.
1539 *
1540 * @param pVCpu The cross context virtual CPU structure of the calling
1541 * thread.
1542 * @param GCPtr The address of the page to invalidate
1543 */
1544VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1545{
1546#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1547 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1549 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1550 uintptr_t idx = (uint8_t)GCPtr;
1551
1552# ifdef IEM_WITH_CODE_TLB
1553 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1554 {
1555 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1556 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1557 pVCpu->iem.s.cbInstrBufTotal = 0;
1558 }
1559# endif
1560
1561# ifdef IEM_WITH_DATA_TLB
1562 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1563 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1564# endif
1565#else
1566 NOREF(pVCpu); NOREF(GCPtr);
1567#endif
1568}
1569
1570
1571/**
1572 * Invalidates the host physical aspects of the IEM TLBs.
1573 *
1574 * This is called internally as well as by PGM when moving GC mappings.
1575 *
1576 * @param pVCpu The cross context virtual CPU structure of the calling
1577 * thread.
1578 */
1579VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1580{
1581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1582 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1583
1584# ifdef IEM_WITH_CODE_TLB
1585 pVCpu->iem.s.cbInstrBufTotal = 0;
1586# endif
1587 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1588 if (uTlbPhysRev != 0)
1589 {
1590 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1591 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1592 }
1593 else
1594 {
1595 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1596 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1597
1598 unsigned i;
1599# ifdef IEM_WITH_CODE_TLB
1600 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1601 while (i-- > 0)
1602 {
1603 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1604 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1605 }
1606# endif
1607# ifdef IEM_WITH_DATA_TLB
1608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1609 while (i-- > 0)
1610 {
1611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1613 }
1614# endif
1615 }
1616#else
1617 NOREF(pVCpu);
1618#endif
1619}
1620
1621
1622/**
1623 * Invalidates the host physical aspects of the IEM TLBs.
1624 *
1625 * This is called internally as well as by PGM when moving GC mappings.
1626 *
1627 * @param pVM The cross context VM structure.
1628 *
1629 * @remarks Caller holds the PGM lock.
1630 */
1631VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1632{
1633 RT_NOREF_PV(pVM);
1634}
1635
1636#ifdef IEM_WITH_CODE_TLB
1637
1638/**
1639 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1640 * failure and jumps.
1641 *
1642 * We end up here for a number of reasons:
1643 * - pbInstrBuf isn't yet initialized.
1644 * - Advancing beyond the buffer boundrary (e.g. cross page).
1645 * - Advancing beyond the CS segment limit.
1646 * - Fetching from non-mappable page (e.g. MMIO).
1647 *
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param pvDst Where to return the bytes.
1651 * @param cbDst Number of bytes to read.
1652 *
1653 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1654 */
1655IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1656{
1657#ifdef IN_RING3
1658 for (;;)
1659 {
1660 Assert(cbDst <= 8);
1661 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1662
1663 /*
1664 * We might have a partial buffer match, deal with that first to make the
1665 * rest simpler. This is the first part of the cross page/buffer case.
1666 */
1667 if (pVCpu->iem.s.pbInstrBuf != NULL)
1668 {
1669 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1670 {
1671 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1672 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1673 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1674
1675 cbDst -= cbCopy;
1676 pvDst = (uint8_t *)pvDst + cbCopy;
1677 offBuf += cbCopy;
1678 pVCpu->iem.s.offInstrNextByte += offBuf;
1679 }
1680 }
1681
1682 /*
1683 * Check segment limit, figuring how much we're allowed to access at this point.
1684 *
1685 * We will fault immediately if RIP is past the segment limit / in non-canonical
1686 * territory. If we do continue, there are one or more bytes to read before we
1687 * end up in trouble and we need to do that first before faulting.
1688 */
1689 RTGCPTR GCPtrFirst;
1690 uint32_t cbMaxRead;
1691 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1692 {
1693 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1694 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1695 { /* likely */ }
1696 else
1697 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1698 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1699 }
1700 else
1701 {
1702 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1703 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1704 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1705 { /* likely */ }
1706 else
1707 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1708 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1709 if (cbMaxRead != 0)
1710 { /* likely */ }
1711 else
1712 {
1713 /* Overflowed because address is 0 and limit is max. */
1714 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1715 cbMaxRead = X86_PAGE_SIZE;
1716 }
1717 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1718 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1719 if (cbMaxRead2 < cbMaxRead)
1720 cbMaxRead = cbMaxRead2;
1721 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1722 }
1723
1724 /*
1725 * Get the TLB entry for this piece of code.
1726 */
1727 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1728 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1729 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1730 if (pTlbe->uTag == uTag)
1731 {
1732 /* likely when executing lots of code, otherwise unlikely */
1733# ifdef VBOX_WITH_STATISTICS
1734 pVCpu->iem.s.CodeTlb.cTlbHits++;
1735# endif
1736 }
1737 else
1738 {
1739 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1740 RTGCPHYS GCPhys;
1741 uint64_t fFlags;
1742 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1743 if (RT_FAILURE(rc))
1744 {
1745 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1746 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1747 }
1748
1749 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1750 pTlbe->uTag = uTag;
1751 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1752 pTlbe->GCPhys = GCPhys;
1753 pTlbe->pbMappingR3 = NULL;
1754 }
1755
1756 /*
1757 * Check TLB page table level access flags.
1758 */
1759 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1760 {
1761 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1762 {
1763 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1764 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1765 }
1766 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1770 }
1771 }
1772
1773 /*
1774 * Look up the physical page info if necessary.
1775 */
1776 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1777 { /* not necessary */ }
1778 else
1779 {
1780 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1782 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1783 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1784 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1785 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1786 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1787 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1788 }
1789
1790# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1791 /*
1792 * Try do a direct read using the pbMappingR3 pointer.
1793 */
1794 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1795 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1796 {
1797 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1798 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1799 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1800 {
1801 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1802 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1803 }
1804 else
1805 {
1806 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1807 Assert(cbInstr < cbMaxRead);
1808 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1809 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1810 }
1811 if (cbDst <= cbMaxRead)
1812 {
1813 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1814 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1815 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1816 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1817 return;
1818 }
1819 pVCpu->iem.s.pbInstrBuf = NULL;
1820
1821 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1822 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1823 }
1824 else
1825# endif
1826#if 0
1827 /*
1828 * If there is no special read handling, so we can read a bit more and
1829 * put it in the prefetch buffer.
1830 */
1831 if ( cbDst < cbMaxRead
1832 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1833 {
1834 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1835 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1836 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1837 { /* likely */ }
1838 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1839 {
1840 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1841 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1842 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1843 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1844 }
1845 else
1846 {
1847 Log((RT_SUCCESS(rcStrict)
1848 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1849 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1850 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1851 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1852 }
1853 }
1854 /*
1855 * Special read handling, so only read exactly what's needed.
1856 * This is a highly unlikely scenario.
1857 */
1858 else
1859#endif
1860 {
1861 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1862 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1863 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1864 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1865 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1866 { /* likely */ }
1867 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1868 {
1869 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1870 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1871 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1872 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1873 }
1874 else
1875 {
1876 Log((RT_SUCCESS(rcStrict)
1877 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1878 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1879 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1880 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1881 }
1882 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1883 if (cbToRead == cbDst)
1884 return;
1885 }
1886
1887 /*
1888 * More to read, loop.
1889 */
1890 cbDst -= cbMaxRead;
1891 pvDst = (uint8_t *)pvDst + cbMaxRead;
1892 }
1893#else
1894 RT_NOREF(pvDst, cbDst);
1895 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1896#endif
1897}
1898
1899#else
1900
1901/**
1902 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1903 * exception if it fails.
1904 *
1905 * @returns Strict VBox status code.
1906 * @param pVCpu The cross context virtual CPU structure of the
1907 * calling thread.
1908 * @param cbMin The minimum number of bytes relative offOpcode
1909 * that must be read.
1910 */
1911IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1912{
1913 /*
1914 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1915 *
1916 * First translate CS:rIP to a physical address.
1917 */
1918 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1919 uint32_t cbToTryRead;
1920 RTGCPTR GCPtrNext;
1921 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1922 {
1923 cbToTryRead = PAGE_SIZE;
1924 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1925 if (!IEM_IS_CANONICAL(GCPtrNext))
1926 return iemRaiseGeneralProtectionFault0(pVCpu);
1927 }
1928 else
1929 {
1930 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1931 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1932 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1933 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1934 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1935 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1936 if (!cbToTryRead) /* overflowed */
1937 {
1938 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1939 cbToTryRead = UINT32_MAX;
1940 /** @todo check out wrapping around the code segment. */
1941 }
1942 if (cbToTryRead < cbMin - cbLeft)
1943 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1944 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1945 }
1946
1947 /* Only read up to the end of the page, and make sure we don't read more
1948 than the opcode buffer can hold. */
1949 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1950 if (cbToTryRead > cbLeftOnPage)
1951 cbToTryRead = cbLeftOnPage;
1952 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1953 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1954/** @todo r=bird: Convert assertion into undefined opcode exception? */
1955 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1956
1957 RTGCPHYS GCPhys;
1958 uint64_t fFlags;
1959 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1960 if (RT_FAILURE(rc))
1961 {
1962 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1963 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1964 }
1965 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1966 {
1967 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1968 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1969 }
1970 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1971 {
1972 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1973 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1974 }
1975 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1976 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1977 /** @todo Check reserved bits and such stuff. PGM is better at doing
1978 * that, so do it when implementing the guest virtual address
1979 * TLB... */
1980
1981 /*
1982 * Read the bytes at this address.
1983 *
1984 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1985 * and since PATM should only patch the start of an instruction there
1986 * should be no need to check again here.
1987 */
1988 if (!pVCpu->iem.s.fBypassHandlers)
1989 {
1990 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1991 cbToTryRead, PGMACCESSORIGIN_IEM);
1992 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1993 { /* likely */ }
1994 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1995 {
1996 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1997 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1998 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1999 }
2000 else
2001 {
2002 Log((RT_SUCCESS(rcStrict)
2003 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2004 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2005 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2006 return rcStrict;
2007 }
2008 }
2009 else
2010 {
2011 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2012 if (RT_SUCCESS(rc))
2013 { /* likely */ }
2014 else
2015 {
2016 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2017 return rc;
2018 }
2019 }
2020 pVCpu->iem.s.cbOpcode += cbToTryRead;
2021 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2022
2023 return VINF_SUCCESS;
2024}
2025
2026#endif /* !IEM_WITH_CODE_TLB */
2027#ifndef IEM_WITH_SETJMP
2028
2029/**
2030 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2031 *
2032 * @returns Strict VBox status code.
2033 * @param pVCpu The cross context virtual CPU structure of the
2034 * calling thread.
2035 * @param pb Where to return the opcode byte.
2036 */
2037DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2038{
2039 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2040 if (rcStrict == VINF_SUCCESS)
2041 {
2042 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2043 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2044 pVCpu->iem.s.offOpcode = offOpcode + 1;
2045 }
2046 else
2047 *pb = 0;
2048 return rcStrict;
2049}
2050
2051
2052/**
2053 * Fetches the next opcode byte.
2054 *
2055 * @returns Strict VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure of the
2057 * calling thread.
2058 * @param pu8 Where to return the opcode byte.
2059 */
2060DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2061{
2062 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2067 return VINF_SUCCESS;
2068 }
2069 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2070}
2071
2072#else /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2076 *
2077 * @returns The opcode byte.
2078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2079 */
2080DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2081{
2082# ifdef IEM_WITH_CODE_TLB
2083 uint8_t u8;
2084 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2085 return u8;
2086# else
2087 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2088 if (rcStrict == VINF_SUCCESS)
2089 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2090 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2091# endif
2092}
2093
2094
2095/**
2096 * Fetches the next opcode byte, longjmp on error.
2097 *
2098 * @returns The opcode byte.
2099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2100 */
2101DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2102{
2103# ifdef IEM_WITH_CODE_TLB
2104 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2105 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2106 if (RT_LIKELY( pbBuf != NULL
2107 && offBuf < pVCpu->iem.s.cbInstrBuf))
2108 {
2109 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2110 return pbBuf[offBuf];
2111 }
2112# else
2113 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2114 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2115 {
2116 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2117 return pVCpu->iem.s.abOpcode[offOpcode];
2118 }
2119# endif
2120 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2121}
2122
2123#endif /* IEM_WITH_SETJMP */
2124
2125/**
2126 * Fetches the next opcode byte, returns automatically on failure.
2127 *
2128 * @param a_pu8 Where to return the opcode byte.
2129 * @remark Implicitly references pVCpu.
2130 */
2131#ifndef IEM_WITH_SETJMP
2132# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2133 do \
2134 { \
2135 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2136 if (rcStrict2 == VINF_SUCCESS) \
2137 { /* likely */ } \
2138 else \
2139 return rcStrict2; \
2140 } while (0)
2141#else
2142# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2143#endif /* IEM_WITH_SETJMP */
2144
2145
2146#ifndef IEM_WITH_SETJMP
2147/**
2148 * Fetches the next signed byte from the opcode stream.
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 * @param pi8 Where to return the signed byte.
2153 */
2154DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2155{
2156 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2157}
2158#endif /* !IEM_WITH_SETJMP */
2159
2160
2161/**
2162 * Fetches the next signed byte from the opcode stream, returning automatically
2163 * on failure.
2164 *
2165 * @param a_pi8 Where to return the signed byte.
2166 * @remark Implicitly references pVCpu.
2167 */
2168#ifndef IEM_WITH_SETJMP
2169# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2170 do \
2171 { \
2172 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2173 if (rcStrict2 != VINF_SUCCESS) \
2174 return rcStrict2; \
2175 } while (0)
2176#else /* IEM_WITH_SETJMP */
2177# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181#ifndef IEM_WITH_SETJMP
2182
2183/**
2184 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2185 *
2186 * @returns Strict VBox status code.
2187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2188 * @param pu16 Where to return the opcode dword.
2189 */
2190DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2191{
2192 uint8_t u8;
2193 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2194 if (rcStrict == VINF_SUCCESS)
2195 *pu16 = (int8_t)u8;
2196 return rcStrict;
2197}
2198
2199
2200/**
2201 * Fetches the next signed byte from the opcode stream, extending it to
2202 * unsigned 16-bit.
2203 *
2204 * @returns Strict VBox status code.
2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2206 * @param pu16 Where to return the unsigned word.
2207 */
2208DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2209{
2210 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2211 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2212 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2213
2214 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2215 pVCpu->iem.s.offOpcode = offOpcode + 1;
2216 return VINF_SUCCESS;
2217}
2218
2219#endif /* !IEM_WITH_SETJMP */
2220
2221/**
2222 * Fetches the next signed byte from the opcode stream and sign-extending it to
2223 * a word, returning automatically on failure.
2224 *
2225 * @param a_pu16 Where to return the word.
2226 * @remark Implicitly references pVCpu.
2227 */
2228#ifndef IEM_WITH_SETJMP
2229# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2230 do \
2231 { \
2232 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2233 if (rcStrict2 != VINF_SUCCESS) \
2234 return rcStrict2; \
2235 } while (0)
2236#else
2237# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2238#endif
2239
2240#ifndef IEM_WITH_SETJMP
2241
2242/**
2243 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2244 *
2245 * @returns Strict VBox status code.
2246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2247 * @param pu32 Where to return the opcode dword.
2248 */
2249DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2250{
2251 uint8_t u8;
2252 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2253 if (rcStrict == VINF_SUCCESS)
2254 *pu32 = (int8_t)u8;
2255 return rcStrict;
2256}
2257
2258
2259/**
2260 * Fetches the next signed byte from the opcode stream, extending it to
2261 * unsigned 32-bit.
2262 *
2263 * @returns Strict VBox status code.
2264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2265 * @param pu32 Where to return the unsigned dword.
2266 */
2267DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2268{
2269 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2270 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2271 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2272
2273 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2274 pVCpu->iem.s.offOpcode = offOpcode + 1;
2275 return VINF_SUCCESS;
2276}
2277
2278#endif /* !IEM_WITH_SETJMP */
2279
2280/**
2281 * Fetches the next signed byte from the opcode stream and sign-extending it to
2282 * a word, returning automatically on failure.
2283 *
2284 * @param a_pu32 Where to return the word.
2285 * @remark Implicitly references pVCpu.
2286 */
2287#ifndef IEM_WITH_SETJMP
2288#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2289 do \
2290 { \
2291 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2292 if (rcStrict2 != VINF_SUCCESS) \
2293 return rcStrict2; \
2294 } while (0)
2295#else
2296# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2297#endif
2298
2299#ifndef IEM_WITH_SETJMP
2300
2301/**
2302 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2303 *
2304 * @returns Strict VBox status code.
2305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2306 * @param pu64 Where to return the opcode qword.
2307 */
2308DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2309{
2310 uint8_t u8;
2311 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2312 if (rcStrict == VINF_SUCCESS)
2313 *pu64 = (int8_t)u8;
2314 return rcStrict;
2315}
2316
2317
2318/**
2319 * Fetches the next signed byte from the opcode stream, extending it to
2320 * unsigned 64-bit.
2321 *
2322 * @returns Strict VBox status code.
2323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2324 * @param pu64 Where to return the unsigned qword.
2325 */
2326DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2327{
2328 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2329 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2330 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2331
2332 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2333 pVCpu->iem.s.offOpcode = offOpcode + 1;
2334 return VINF_SUCCESS;
2335}
2336
2337#endif /* !IEM_WITH_SETJMP */
2338
2339
2340/**
2341 * Fetches the next signed byte from the opcode stream and sign-extending it to
2342 * a word, returning automatically on failure.
2343 *
2344 * @param a_pu64 Where to return the word.
2345 * @remark Implicitly references pVCpu.
2346 */
2347#ifndef IEM_WITH_SETJMP
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2349 do \
2350 { \
2351 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2352 if (rcStrict2 != VINF_SUCCESS) \
2353 return rcStrict2; \
2354 } while (0)
2355#else
2356# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2357#endif
2358
2359
2360#ifndef IEM_WITH_SETJMP
2361/**
2362 * Fetches the next opcode byte.
2363 *
2364 * @returns Strict VBox status code.
2365 * @param pVCpu The cross context virtual CPU structure of the
2366 * calling thread.
2367 * @param pu8 Where to return the opcode byte.
2368 */
2369DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2370{
2371 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2372 pVCpu->iem.s.offModRm = offOpcode;
2373 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2374 {
2375 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2376 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2377 return VINF_SUCCESS;
2378 }
2379 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2380}
2381#else /* IEM_WITH_SETJMP */
2382/**
2383 * Fetches the next opcode byte, longjmp on error.
2384 *
2385 * @returns The opcode byte.
2386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2387 */
2388DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2389{
2390# ifdef IEM_WITH_CODE_TLB
2391 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2392 pVCpu->iem.s.offModRm = offBuf;
2393 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2394 if (RT_LIKELY( pbBuf != NULL
2395 && offBuf < pVCpu->iem.s.cbInstrBuf))
2396 {
2397 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2398 return pbBuf[offBuf];
2399 }
2400# else
2401 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2402 pVCpu->iem.s.offModRm = offOpcode;
2403 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2404 {
2405 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2406 return pVCpu->iem.s.abOpcode[offOpcode];
2407 }
2408# endif
2409 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2410}
2411#endif /* IEM_WITH_SETJMP */
2412
2413/**
2414 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2415 * on failure.
2416 *
2417 * Will note down the position of the ModR/M byte for VT-x exits.
2418 *
2419 * @param a_pbRm Where to return the RM opcode byte.
2420 * @remark Implicitly references pVCpu.
2421 */
2422#ifndef IEM_WITH_SETJMP
2423# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2424 do \
2425 { \
2426 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2427 if (rcStrict2 == VINF_SUCCESS) \
2428 { /* likely */ } \
2429 else \
2430 return rcStrict2; \
2431 } while (0)
2432#else
2433# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2434#endif /* IEM_WITH_SETJMP */
2435
2436
2437#ifndef IEM_WITH_SETJMP
2438
2439/**
2440 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2441 *
2442 * @returns Strict VBox status code.
2443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2444 * @param pu16 Where to return the opcode word.
2445 */
2446DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2447{
2448 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2449 if (rcStrict == VINF_SUCCESS)
2450 {
2451 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2452# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2453 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2454# else
2455 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2456# endif
2457 pVCpu->iem.s.offOpcode = offOpcode + 2;
2458 }
2459 else
2460 *pu16 = 0;
2461 return rcStrict;
2462}
2463
2464
2465/**
2466 * Fetches the next opcode word.
2467 *
2468 * @returns Strict VBox status code.
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 * @param pu16 Where to return the opcode word.
2471 */
2472DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2473{
2474 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2475 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2476 {
2477 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2478# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2479 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2480# else
2481 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2482# endif
2483 return VINF_SUCCESS;
2484 }
2485 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2486}
2487
2488#else /* IEM_WITH_SETJMP */
2489
2490/**
2491 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2492 *
2493 * @returns The opcode word.
2494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2495 */
2496DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2497{
2498# ifdef IEM_WITH_CODE_TLB
2499 uint16_t u16;
2500 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2501 return u16;
2502# else
2503 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2504 if (rcStrict == VINF_SUCCESS)
2505 {
2506 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2507 pVCpu->iem.s.offOpcode += 2;
2508# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2509 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2510# else
2511 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2512# endif
2513 }
2514 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2515# endif
2516}
2517
2518
2519/**
2520 * Fetches the next opcode word, longjmp on error.
2521 *
2522 * @returns The opcode word.
2523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2524 */
2525DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2526{
2527# ifdef IEM_WITH_CODE_TLB
2528 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2529 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2530 if (RT_LIKELY( pbBuf != NULL
2531 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2532 {
2533 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2534# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2535 return *(uint16_t const *)&pbBuf[offBuf];
2536# else
2537 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2538# endif
2539 }
2540# else
2541 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2542 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2543 {
2544 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2546 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2547# else
2548 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2549# endif
2550 }
2551# endif
2552 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2553}
2554
2555#endif /* IEM_WITH_SETJMP */
2556
2557
2558/**
2559 * Fetches the next opcode word, returns automatically on failure.
2560 *
2561 * @param a_pu16 Where to return the opcode word.
2562 * @remark Implicitly references pVCpu.
2563 */
2564#ifndef IEM_WITH_SETJMP
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2566 do \
2567 { \
2568 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2569 if (rcStrict2 != VINF_SUCCESS) \
2570 return rcStrict2; \
2571 } while (0)
2572#else
2573# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2574#endif
2575
2576#ifndef IEM_WITH_SETJMP
2577
2578/**
2579 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2580 *
2581 * @returns Strict VBox status code.
2582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2583 * @param pu32 Where to return the opcode double word.
2584 */
2585DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2586{
2587 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2588 if (rcStrict == VINF_SUCCESS)
2589 {
2590 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2591 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2592 pVCpu->iem.s.offOpcode = offOpcode + 2;
2593 }
2594 else
2595 *pu32 = 0;
2596 return rcStrict;
2597}
2598
2599
2600/**
2601 * Fetches the next opcode word, zero extending it to a double word.
2602 *
2603 * @returns Strict VBox status code.
2604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2605 * @param pu32 Where to return the opcode double word.
2606 */
2607DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2608{
2609 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2610 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2611 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2612
2613 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2614 pVCpu->iem.s.offOpcode = offOpcode + 2;
2615 return VINF_SUCCESS;
2616}
2617
2618#endif /* !IEM_WITH_SETJMP */
2619
2620
2621/**
2622 * Fetches the next opcode word and zero extends it to a double word, returns
2623 * automatically on failure.
2624 *
2625 * @param a_pu32 Where to return the opcode double word.
2626 * @remark Implicitly references pVCpu.
2627 */
2628#ifndef IEM_WITH_SETJMP
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2630 do \
2631 { \
2632 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2633 if (rcStrict2 != VINF_SUCCESS) \
2634 return rcStrict2; \
2635 } while (0)
2636#else
2637# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2638#endif
2639
2640#ifndef IEM_WITH_SETJMP
2641
2642/**
2643 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2644 *
2645 * @returns Strict VBox status code.
2646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2647 * @param pu64 Where to return the opcode quad word.
2648 */
2649DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2650{
2651 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2652 if (rcStrict == VINF_SUCCESS)
2653 {
2654 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2655 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2656 pVCpu->iem.s.offOpcode = offOpcode + 2;
2657 }
2658 else
2659 *pu64 = 0;
2660 return rcStrict;
2661}
2662
2663
2664/**
2665 * Fetches the next opcode word, zero extending it to a quad word.
2666 *
2667 * @returns Strict VBox status code.
2668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2669 * @param pu64 Where to return the opcode quad word.
2670 */
2671DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2672{
2673 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2674 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2675 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2676
2677 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2678 pVCpu->iem.s.offOpcode = offOpcode + 2;
2679 return VINF_SUCCESS;
2680}
2681
2682#endif /* !IEM_WITH_SETJMP */
2683
2684/**
2685 * Fetches the next opcode word and zero extends it to a quad word, returns
2686 * automatically on failure.
2687 *
2688 * @param a_pu64 Where to return the opcode quad word.
2689 * @remark Implicitly references pVCpu.
2690 */
2691#ifndef IEM_WITH_SETJMP
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2693 do \
2694 { \
2695 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2696 if (rcStrict2 != VINF_SUCCESS) \
2697 return rcStrict2; \
2698 } while (0)
2699#else
2700# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2701#endif
2702
2703
2704#ifndef IEM_WITH_SETJMP
2705/**
2706 * Fetches the next signed word from the opcode stream.
2707 *
2708 * @returns Strict VBox status code.
2709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2710 * @param pi16 Where to return the signed word.
2711 */
2712DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2713{
2714 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2715}
2716#endif /* !IEM_WITH_SETJMP */
2717
2718
2719/**
2720 * Fetches the next signed word from the opcode stream, returning automatically
2721 * on failure.
2722 *
2723 * @param a_pi16 Where to return the signed word.
2724 * @remark Implicitly references pVCpu.
2725 */
2726#ifndef IEM_WITH_SETJMP
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2728 do \
2729 { \
2730 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2731 if (rcStrict2 != VINF_SUCCESS) \
2732 return rcStrict2; \
2733 } while (0)
2734#else
2735# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2736#endif
2737
2738#ifndef IEM_WITH_SETJMP
2739
2740/**
2741 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2742 *
2743 * @returns Strict VBox status code.
2744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2745 * @param pu32 Where to return the opcode dword.
2746 */
2747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2748{
2749 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2750 if (rcStrict == VINF_SUCCESS)
2751 {
2752 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2753# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2754 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2755# else
2756 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2757 pVCpu->iem.s.abOpcode[offOpcode + 1],
2758 pVCpu->iem.s.abOpcode[offOpcode + 2],
2759 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2760# endif
2761 pVCpu->iem.s.offOpcode = offOpcode + 4;
2762 }
2763 else
2764 *pu32 = 0;
2765 return rcStrict;
2766}
2767
2768
2769/**
2770 * Fetches the next opcode dword.
2771 *
2772 * @returns Strict VBox status code.
2773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2774 * @param pu32 Where to return the opcode double word.
2775 */
2776DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2777{
2778 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2779 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2780 {
2781 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2782# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2783 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2784# else
2785 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2786 pVCpu->iem.s.abOpcode[offOpcode + 1],
2787 pVCpu->iem.s.abOpcode[offOpcode + 2],
2788 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2789# endif
2790 return VINF_SUCCESS;
2791 }
2792 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2793}
2794
2795#else /* !IEM_WITH_SETJMP */
2796
2797/**
2798 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2799 *
2800 * @returns The opcode dword.
2801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2802 */
2803DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2804{
2805# ifdef IEM_WITH_CODE_TLB
2806 uint32_t u32;
2807 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2808 return u32;
2809# else
2810 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2811 if (rcStrict == VINF_SUCCESS)
2812 {
2813 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2814 pVCpu->iem.s.offOpcode = offOpcode + 4;
2815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2816 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2817# else
2818 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2819 pVCpu->iem.s.abOpcode[offOpcode + 1],
2820 pVCpu->iem.s.abOpcode[offOpcode + 2],
2821 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2822# endif
2823 }
2824 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2825# endif
2826}
2827
2828
2829/**
2830 * Fetches the next opcode dword, longjmp on error.
2831 *
2832 * @returns The opcode dword.
2833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2834 */
2835DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2836{
2837# ifdef IEM_WITH_CODE_TLB
2838 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2839 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2840 if (RT_LIKELY( pbBuf != NULL
2841 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2842 {
2843 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2844# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2845 return *(uint32_t const *)&pbBuf[offBuf];
2846# else
2847 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2848 pbBuf[offBuf + 1],
2849 pbBuf[offBuf + 2],
2850 pbBuf[offBuf + 3]);
2851# endif
2852 }
2853# else
2854 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2855 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2856 {
2857 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2858# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2859 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2860# else
2861 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2862 pVCpu->iem.s.abOpcode[offOpcode + 1],
2863 pVCpu->iem.s.abOpcode[offOpcode + 2],
2864 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2865# endif
2866 }
2867# endif
2868 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2869}
2870
2871#endif /* !IEM_WITH_SETJMP */
2872
2873
2874/**
2875 * Fetches the next opcode dword, returns automatically on failure.
2876 *
2877 * @param a_pu32 Where to return the opcode dword.
2878 * @remark Implicitly references pVCpu.
2879 */
2880#ifndef IEM_WITH_SETJMP
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2882 do \
2883 { \
2884 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2885 if (rcStrict2 != VINF_SUCCESS) \
2886 return rcStrict2; \
2887 } while (0)
2888#else
2889# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2890#endif
2891
2892#ifndef IEM_WITH_SETJMP
2893
2894/**
2895 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode dword.
2900 */
2901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2902{
2903 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2904 if (rcStrict == VINF_SUCCESS)
2905 {
2906 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2907 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2911 pVCpu->iem.s.offOpcode = offOpcode + 4;
2912 }
2913 else
2914 *pu64 = 0;
2915 return rcStrict;
2916}
2917
2918
2919/**
2920 * Fetches the next opcode dword, zero extending it to a quad word.
2921 *
2922 * @returns Strict VBox status code.
2923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2924 * @param pu64 Where to return the opcode quad word.
2925 */
2926DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2927{
2928 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2929 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2930 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2931
2932 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2933 pVCpu->iem.s.abOpcode[offOpcode + 1],
2934 pVCpu->iem.s.abOpcode[offOpcode + 2],
2935 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2936 pVCpu->iem.s.offOpcode = offOpcode + 4;
2937 return VINF_SUCCESS;
2938}
2939
2940#endif /* !IEM_WITH_SETJMP */
2941
2942
2943/**
2944 * Fetches the next opcode dword and zero extends it to a quad word, returns
2945 * automatically on failure.
2946 *
2947 * @param a_pu64 Where to return the opcode quad word.
2948 * @remark Implicitly references pVCpu.
2949 */
2950#ifndef IEM_WITH_SETJMP
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2952 do \
2953 { \
2954 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2955 if (rcStrict2 != VINF_SUCCESS) \
2956 return rcStrict2; \
2957 } while (0)
2958#else
2959# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2960#endif
2961
2962
2963#ifndef IEM_WITH_SETJMP
2964/**
2965 * Fetches the next signed double word from the opcode stream.
2966 *
2967 * @returns Strict VBox status code.
2968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2969 * @param pi32 Where to return the signed double word.
2970 */
2971DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2972{
2973 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2974}
2975#endif
2976
2977/**
2978 * Fetches the next signed double word from the opcode stream, returning
2979 * automatically on failure.
2980 *
2981 * @param a_pi32 Where to return the signed double word.
2982 * @remark Implicitly references pVCpu.
2983 */
2984#ifndef IEM_WITH_SETJMP
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2986 do \
2987 { \
2988 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2989 if (rcStrict2 != VINF_SUCCESS) \
2990 return rcStrict2; \
2991 } while (0)
2992#else
2993# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2994#endif
2995
2996#ifndef IEM_WITH_SETJMP
2997
2998/**
2999 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode qword.
3004 */
3005DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3006{
3007 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3008 if (rcStrict == VINF_SUCCESS)
3009 {
3010 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3011 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 pVCpu->iem.s.offOpcode = offOpcode + 4;
3016 }
3017 else
3018 *pu64 = 0;
3019 return rcStrict;
3020}
3021
3022
3023/**
3024 * Fetches the next opcode dword, sign extending it into a quad word.
3025 *
3026 * @returns Strict VBox status code.
3027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3028 * @param pu64 Where to return the opcode quad word.
3029 */
3030DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3031{
3032 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3033 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3034 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3035
3036 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3037 pVCpu->iem.s.abOpcode[offOpcode + 1],
3038 pVCpu->iem.s.abOpcode[offOpcode + 2],
3039 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3040 *pu64 = i32;
3041 pVCpu->iem.s.offOpcode = offOpcode + 4;
3042 return VINF_SUCCESS;
3043}
3044
3045#endif /* !IEM_WITH_SETJMP */
3046
3047
3048/**
3049 * Fetches the next opcode double word and sign extends it to a quad word,
3050 * returns automatically on failure.
3051 *
3052 * @param a_pu64 Where to return the opcode quad word.
3053 * @remark Implicitly references pVCpu.
3054 */
3055#ifndef IEM_WITH_SETJMP
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3057 do \
3058 { \
3059 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3060 if (rcStrict2 != VINF_SUCCESS) \
3061 return rcStrict2; \
3062 } while (0)
3063#else
3064# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3065#endif
3066
3067#ifndef IEM_WITH_SETJMP
3068
3069/**
3070 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3071 *
3072 * @returns Strict VBox status code.
3073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3074 * @param pu64 Where to return the opcode qword.
3075 */
3076DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3077{
3078 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3079 if (rcStrict == VINF_SUCCESS)
3080 {
3081 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3082# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3083 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3084# else
3085 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3086 pVCpu->iem.s.abOpcode[offOpcode + 1],
3087 pVCpu->iem.s.abOpcode[offOpcode + 2],
3088 pVCpu->iem.s.abOpcode[offOpcode + 3],
3089 pVCpu->iem.s.abOpcode[offOpcode + 4],
3090 pVCpu->iem.s.abOpcode[offOpcode + 5],
3091 pVCpu->iem.s.abOpcode[offOpcode + 6],
3092 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3093# endif
3094 pVCpu->iem.s.offOpcode = offOpcode + 8;
3095 }
3096 else
3097 *pu64 = 0;
3098 return rcStrict;
3099}
3100
3101
3102/**
3103 * Fetches the next opcode qword.
3104 *
3105 * @returns Strict VBox status code.
3106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3107 * @param pu64 Where to return the opcode qword.
3108 */
3109DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3110{
3111 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3112 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3113 {
3114# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3115 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3116# else
3117 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3118 pVCpu->iem.s.abOpcode[offOpcode + 1],
3119 pVCpu->iem.s.abOpcode[offOpcode + 2],
3120 pVCpu->iem.s.abOpcode[offOpcode + 3],
3121 pVCpu->iem.s.abOpcode[offOpcode + 4],
3122 pVCpu->iem.s.abOpcode[offOpcode + 5],
3123 pVCpu->iem.s.abOpcode[offOpcode + 6],
3124 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3125# endif
3126 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3127 return VINF_SUCCESS;
3128 }
3129 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3130}
3131
3132#else /* IEM_WITH_SETJMP */
3133
3134/**
3135 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3136 *
3137 * @returns The opcode qword.
3138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3139 */
3140DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3141{
3142# ifdef IEM_WITH_CODE_TLB
3143 uint64_t u64;
3144 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3145 return u64;
3146# else
3147 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3148 if (rcStrict == VINF_SUCCESS)
3149 {
3150 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3151 pVCpu->iem.s.offOpcode = offOpcode + 8;
3152# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3153 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3154# else
3155 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3156 pVCpu->iem.s.abOpcode[offOpcode + 1],
3157 pVCpu->iem.s.abOpcode[offOpcode + 2],
3158 pVCpu->iem.s.abOpcode[offOpcode + 3],
3159 pVCpu->iem.s.abOpcode[offOpcode + 4],
3160 pVCpu->iem.s.abOpcode[offOpcode + 5],
3161 pVCpu->iem.s.abOpcode[offOpcode + 6],
3162 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3163# endif
3164 }
3165 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3166# endif
3167}
3168
3169
3170/**
3171 * Fetches the next opcode qword, longjmp on error.
3172 *
3173 * @returns The opcode qword.
3174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3175 */
3176DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3177{
3178# ifdef IEM_WITH_CODE_TLB
3179 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3180 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3181 if (RT_LIKELY( pbBuf != NULL
3182 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3183 {
3184 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3185# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3186 return *(uint64_t const *)&pbBuf[offBuf];
3187# else
3188 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3189 pbBuf[offBuf + 1],
3190 pbBuf[offBuf + 2],
3191 pbBuf[offBuf + 3],
3192 pbBuf[offBuf + 4],
3193 pbBuf[offBuf + 5],
3194 pbBuf[offBuf + 6],
3195 pbBuf[offBuf + 7]);
3196# endif
3197 }
3198# else
3199 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3200 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3201 {
3202 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3203# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3204 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3205# else
3206 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3207 pVCpu->iem.s.abOpcode[offOpcode + 1],
3208 pVCpu->iem.s.abOpcode[offOpcode + 2],
3209 pVCpu->iem.s.abOpcode[offOpcode + 3],
3210 pVCpu->iem.s.abOpcode[offOpcode + 4],
3211 pVCpu->iem.s.abOpcode[offOpcode + 5],
3212 pVCpu->iem.s.abOpcode[offOpcode + 6],
3213 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3214# endif
3215 }
3216# endif
3217 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3218}
3219
3220#endif /* IEM_WITH_SETJMP */
3221
3222/**
3223 * Fetches the next opcode quad word, returns automatically on failure.
3224 *
3225 * @param a_pu64 Where to return the opcode quad word.
3226 * @remark Implicitly references pVCpu.
3227 */
3228#ifndef IEM_WITH_SETJMP
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3230 do \
3231 { \
3232 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3233 if (rcStrict2 != VINF_SUCCESS) \
3234 return rcStrict2; \
3235 } while (0)
3236#else
3237# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3238#endif
3239
3240
3241/** @name Misc Worker Functions.
3242 * @{
3243 */
3244
3245/**
3246 * Gets the exception class for the specified exception vector.
3247 *
3248 * @returns The class of the specified exception.
3249 * @param uVector The exception vector.
3250 */
3251IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3252{
3253 Assert(uVector <= X86_XCPT_LAST);
3254 switch (uVector)
3255 {
3256 case X86_XCPT_DE:
3257 case X86_XCPT_TS:
3258 case X86_XCPT_NP:
3259 case X86_XCPT_SS:
3260 case X86_XCPT_GP:
3261 case X86_XCPT_SX: /* AMD only */
3262 return IEMXCPTCLASS_CONTRIBUTORY;
3263
3264 case X86_XCPT_PF:
3265 case X86_XCPT_VE: /* Intel only */
3266 return IEMXCPTCLASS_PAGE_FAULT;
3267
3268 case X86_XCPT_DF:
3269 return IEMXCPTCLASS_DOUBLE_FAULT;
3270 }
3271 return IEMXCPTCLASS_BENIGN;
3272}
3273
3274
3275/**
3276 * Evaluates how to handle an exception caused during delivery of another event
3277 * (exception / interrupt).
3278 *
3279 * @returns How to handle the recursive exception.
3280 * @param pVCpu The cross context virtual CPU structure of the
3281 * calling thread.
3282 * @param fPrevFlags The flags of the previous event.
3283 * @param uPrevVector The vector of the previous event.
3284 * @param fCurFlags The flags of the current exception.
3285 * @param uCurVector The vector of the current exception.
3286 * @param pfXcptRaiseInfo Where to store additional information about the
3287 * exception condition. Optional.
3288 */
3289VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3290 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3291{
3292 /*
3293 * Only CPU exceptions can be raised while delivering other events, software interrupt
3294 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3295 */
3296 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3297 Assert(pVCpu); RT_NOREF(pVCpu);
3298 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3299
3300 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3301 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3302 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3303 {
3304 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3305 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3306 {
3307 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3308 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3309 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3310 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3311 {
3312 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3313 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3314 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3315 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3316 uCurVector, pVCpu->cpum.GstCtx.cr2));
3317 }
3318 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3319 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3320 {
3321 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3322 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3325 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3326 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3327 {
3328 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3329 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3330 }
3331 }
3332 else
3333 {
3334 if (uPrevVector == X86_XCPT_NMI)
3335 {
3336 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3337 if (uCurVector == X86_XCPT_PF)
3338 {
3339 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3340 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3341 }
3342 }
3343 else if ( uPrevVector == X86_XCPT_AC
3344 && uCurVector == X86_XCPT_AC)
3345 {
3346 enmRaise = IEMXCPTRAISE_CPU_HANG;
3347 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3348 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3349 }
3350 }
3351 }
3352 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3353 {
3354 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3355 if (uCurVector == X86_XCPT_PF)
3356 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3357 }
3358 else
3359 {
3360 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3361 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3362 }
3363
3364 if (pfXcptRaiseInfo)
3365 *pfXcptRaiseInfo = fRaiseInfo;
3366 return enmRaise;
3367}
3368
3369
3370/**
3371 * Enters the CPU shutdown state initiated by a triple fault or other
3372 * unrecoverable conditions.
3373 *
3374 * @returns Strict VBox status code.
3375 * @param pVCpu The cross context virtual CPU structure of the
3376 * calling thread.
3377 */
3378IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3379{
3380 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3381 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3382
3383 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3384 {
3385 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3386 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3387 }
3388
3389 RT_NOREF(pVCpu);
3390 return VINF_EM_TRIPLE_FAULT;
3391}
3392
3393
3394/**
3395 * Validates a new SS segment.
3396 *
3397 * @returns VBox strict status code.
3398 * @param pVCpu The cross context virtual CPU structure of the
3399 * calling thread.
3400 * @param NewSS The new SS selctor.
3401 * @param uCpl The CPL to load the stack for.
3402 * @param pDesc Where to return the descriptor.
3403 */
3404IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3405{
3406 /* Null selectors are not allowed (we're not called for dispatching
3407 interrupts with SS=0 in long mode). */
3408 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3409 {
3410 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3411 return iemRaiseTaskSwitchFault0(pVCpu);
3412 }
3413
3414 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3415 if ((NewSS & X86_SEL_RPL) != uCpl)
3416 {
3417 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3418 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3419 }
3420
3421 /*
3422 * Read the descriptor.
3423 */
3424 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3425 if (rcStrict != VINF_SUCCESS)
3426 return rcStrict;
3427
3428 /*
3429 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3430 */
3431 if (!pDesc->Legacy.Gen.u1DescType)
3432 {
3433 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3434 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3435 }
3436
3437 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3438 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3439 {
3440 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3441 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3442 }
3443 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3446 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3447 }
3448
3449 /* Is it there? */
3450 /** @todo testcase: Is this checked before the canonical / limit check below? */
3451 if (!pDesc->Legacy.Gen.u1Present)
3452 {
3453 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3454 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3455 }
3456
3457 return VINF_SUCCESS;
3458}
3459
3460
3461/**
3462 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3463 * not (kind of obsolete now).
3464 *
3465 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3466 */
3467#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3468
3469/**
3470 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3471 *
3472 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3473 * @param a_fEfl The new EFLAGS.
3474 */
3475#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3476
3477/** @} */
3478
3479
3480/** @name Raising Exceptions.
3481 *
3482 * @{
3483 */
3484
3485
3486/**
3487 * Loads the specified stack far pointer from the TSS.
3488 *
3489 * @returns VBox strict status code.
3490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3491 * @param uCpl The CPL to load the stack for.
3492 * @param pSelSS Where to return the new stack segment.
3493 * @param puEsp Where to return the new stack pointer.
3494 */
3495IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3496{
3497 VBOXSTRICTRC rcStrict;
3498 Assert(uCpl < 4);
3499
3500 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3501 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3502 {
3503 /*
3504 * 16-bit TSS (X86TSS16).
3505 */
3506 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3507 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3508 {
3509 uint32_t off = uCpl * 4 + 2;
3510 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3511 {
3512 /** @todo check actual access pattern here. */
3513 uint32_t u32Tmp = 0; /* gcc maybe... */
3514 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3515 if (rcStrict == VINF_SUCCESS)
3516 {
3517 *puEsp = RT_LOWORD(u32Tmp);
3518 *pSelSS = RT_HIWORD(u32Tmp);
3519 return VINF_SUCCESS;
3520 }
3521 }
3522 else
3523 {
3524 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3525 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3526 }
3527 break;
3528 }
3529
3530 /*
3531 * 32-bit TSS (X86TSS32).
3532 */
3533 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3534 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3535 {
3536 uint32_t off = uCpl * 8 + 4;
3537 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3538 {
3539/** @todo check actual access pattern here. */
3540 uint64_t u64Tmp;
3541 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 *puEsp = u64Tmp & UINT32_MAX;
3545 *pSelSS = (RTSEL)(u64Tmp >> 32);
3546 return VINF_SUCCESS;
3547 }
3548 }
3549 else
3550 {
3551 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3552 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3553 }
3554 break;
3555 }
3556
3557 default:
3558 AssertFailed();
3559 rcStrict = VERR_IEM_IPE_4;
3560 break;
3561 }
3562
3563 *puEsp = 0; /* make gcc happy */
3564 *pSelSS = 0; /* make gcc happy */
3565 return rcStrict;
3566}
3567
3568
3569/**
3570 * Loads the specified stack pointer from the 64-bit TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3576 * @param puRsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3579{
3580 Assert(uCpl < 4);
3581 Assert(uIst < 8);
3582 *puRsp = 0; /* make gcc happy */
3583
3584 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3585 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3586
3587 uint32_t off;
3588 if (uIst)
3589 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3590 else
3591 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3592 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3593 {
3594 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3595 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3596 }
3597
3598 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3599}
3600
3601
3602/**
3603 * Adjust the CPU state according to the exception being raised.
3604 *
3605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3606 * @param u8Vector The exception that has been raised.
3607 */
3608DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3609{
3610 switch (u8Vector)
3611 {
3612 case X86_XCPT_DB:
3613 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3614 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3615 break;
3616 /** @todo Read the AMD and Intel exception reference... */
3617 }
3618}
3619
3620
3621/**
3622 * Implements exceptions and interrupts for real mode.
3623 *
3624 * @returns VBox strict status code.
3625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3626 * @param cbInstr The number of bytes to offset rIP by in the return
3627 * address.
3628 * @param u8Vector The interrupt / exception vector number.
3629 * @param fFlags The flags.
3630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3632 */
3633IEM_STATIC VBOXSTRICTRC
3634iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3635 uint8_t cbInstr,
3636 uint8_t u8Vector,
3637 uint32_t fFlags,
3638 uint16_t uErr,
3639 uint64_t uCr2)
3640{
3641 NOREF(uErr); NOREF(uCr2);
3642 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3643
3644 /*
3645 * Read the IDT entry.
3646 */
3647 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3648 {
3649 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3650 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3651 }
3652 RTFAR16 Idte;
3653 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 {
3656 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3657 return rcStrict;
3658 }
3659
3660 /*
3661 * Push the stack frame.
3662 */
3663 uint16_t *pu16Frame;
3664 uint64_t uNewRsp;
3665 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3666 if (rcStrict != VINF_SUCCESS)
3667 return rcStrict;
3668
3669 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3670#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3671 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3672 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3673 fEfl |= UINT16_C(0xf000);
3674#endif
3675 pu16Frame[2] = (uint16_t)fEfl;
3676 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3677 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3678 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3679 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3680 return rcStrict;
3681
3682 /*
3683 * Load the vector address into cs:ip and make exception specific state
3684 * adjustments.
3685 */
3686 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3687 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3688 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3689 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3690 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3691 pVCpu->cpum.GstCtx.rip = Idte.off;
3692 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3693 IEMMISC_SET_EFL(pVCpu, fEfl);
3694
3695 /** @todo do we actually do this in real mode? */
3696 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3697 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3698
3699 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3700}
3701
3702
3703/**
3704 * Loads a NULL data selector into when coming from V8086 mode.
3705 *
3706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3707 * @param pSReg Pointer to the segment register.
3708 */
3709IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3710{
3711 pSReg->Sel = 0;
3712 pSReg->ValidSel = 0;
3713 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3714 {
3715 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3716 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3717 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3718 }
3719 else
3720 {
3721 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3722 /** @todo check this on AMD-V */
3723 pSReg->u64Base = 0;
3724 pSReg->u32Limit = 0;
3725 }
3726}
3727
3728
3729/**
3730 * Loads a segment selector during a task switch in V8086 mode.
3731 *
3732 * @param pSReg Pointer to the segment register.
3733 * @param uSel The selector value to load.
3734 */
3735IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3736{
3737 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3738 pSReg->Sel = uSel;
3739 pSReg->ValidSel = uSel;
3740 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3741 pSReg->u64Base = uSel << 4;
3742 pSReg->u32Limit = 0xffff;
3743 pSReg->Attr.u = 0xf3;
3744}
3745
3746
3747/**
3748 * Loads a NULL data selector into a selector register, both the hidden and
3749 * visible parts, in protected mode.
3750 *
3751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3752 * @param pSReg Pointer to the segment register.
3753 * @param uRpl The RPL.
3754 */
3755IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3756{
3757 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3758 * data selector in protected mode. */
3759 pSReg->Sel = uRpl;
3760 pSReg->ValidSel = uRpl;
3761 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3762 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3763 {
3764 /* VT-x (Intel 3960x) observed doing something like this. */
3765 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3766 pSReg->u32Limit = UINT32_MAX;
3767 pSReg->u64Base = 0;
3768 }
3769 else
3770 {
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3772 pSReg->u32Limit = 0;
3773 pSReg->u64Base = 0;
3774 }
3775}
3776
3777
3778/**
3779 * Loads a segment selector during a task switch in protected mode.
3780 *
3781 * In this task switch scenario, we would throw \#TS exceptions rather than
3782 * \#GPs.
3783 *
3784 * @returns VBox strict status code.
3785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3786 * @param pSReg Pointer to the segment register.
3787 * @param uSel The new selector value.
3788 *
3789 * @remarks This does _not_ handle CS or SS.
3790 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3791 */
3792IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3793{
3794 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3795
3796 /* Null data selector. */
3797 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3798 {
3799 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3801 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3802 return VINF_SUCCESS;
3803 }
3804
3805 /* Fetch the descriptor. */
3806 IEMSELDESC Desc;
3807 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3808 if (rcStrict != VINF_SUCCESS)
3809 {
3810 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3811 VBOXSTRICTRC_VAL(rcStrict)));
3812 return rcStrict;
3813 }
3814
3815 /* Must be a data segment or readable code segment. */
3816 if ( !Desc.Legacy.Gen.u1DescType
3817 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3818 {
3819 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3820 Desc.Legacy.Gen.u4Type));
3821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3822 }
3823
3824 /* Check privileges for data segments and non-conforming code segments. */
3825 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3826 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3827 {
3828 /* The RPL and the new CPL must be less than or equal to the DPL. */
3829 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3830 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3831 {
3832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3833 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3835 }
3836 }
3837
3838 /* Is it there? */
3839 if (!Desc.Legacy.Gen.u1Present)
3840 {
3841 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3842 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3843 }
3844
3845 /* The base and limit. */
3846 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3847 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3848
3849 /*
3850 * Ok, everything checked out fine. Now set the accessed bit before
3851 * committing the result into the registers.
3852 */
3853 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3854 {
3855 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3856 if (rcStrict != VINF_SUCCESS)
3857 return rcStrict;
3858 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3859 }
3860
3861 /* Commit */
3862 pSReg->Sel = uSel;
3863 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3864 pSReg->u32Limit = cbLimit;
3865 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3866 pSReg->ValidSel = uSel;
3867 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3868 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3869 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3870
3871 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3872 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3873 return VINF_SUCCESS;
3874}
3875
3876
3877/**
3878 * Performs a task switch.
3879 *
3880 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3881 * caller is responsible for performing the necessary checks (like DPL, TSS
3882 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3883 * reference for JMP, CALL, IRET.
3884 *
3885 * If the task switch is the due to a software interrupt or hardware exception,
3886 * the caller is responsible for validating the TSS selector and descriptor. See
3887 * Intel Instruction reference for INT n.
3888 *
3889 * @returns VBox strict status code.
3890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3891 * @param enmTaskSwitch The cause of the task switch.
3892 * @param uNextEip The EIP effective after the task switch.
3893 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3894 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3895 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3896 * @param SelTSS The TSS selector of the new task.
3897 * @param pNewDescTSS Pointer to the new TSS descriptor.
3898 */
3899IEM_STATIC VBOXSTRICTRC
3900iemTaskSwitch(PVMCPUCC pVCpu,
3901 IEMTASKSWITCH enmTaskSwitch,
3902 uint32_t uNextEip,
3903 uint32_t fFlags,
3904 uint16_t uErr,
3905 uint64_t uCr2,
3906 RTSEL SelTSS,
3907 PIEMSELDESC pNewDescTSS)
3908{
3909 Assert(!IEM_IS_REAL_MODE(pVCpu));
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3912
3913 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3914 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3915 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3917 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3918
3919 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3920 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3921
3922 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3923 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3924
3925 /* Update CR2 in case it's a page-fault. */
3926 /** @todo This should probably be done much earlier in IEM/PGM. See
3927 * @bugref{5653#c49}. */
3928 if (fFlags & IEM_XCPT_FLAGS_CR2)
3929 pVCpu->cpum.GstCtx.cr2 = uCr2;
3930
3931 /*
3932 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3933 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3934 */
3935 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3936 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3937 if (uNewTSSLimit < uNewTSSLimitMin)
3938 {
3939 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3940 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3942 }
3943
3944 /*
3945 * Task switches in VMX non-root mode always cause task switches.
3946 * The new TSS must have been read and validated (DPL, limits etc.) before a
3947 * task-switch VM-exit commences.
3948 *
3949 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
3950 */
3951 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3952 {
3953 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3954 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3955 }
3956
3957 /*
3958 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3959 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3960 */
3961 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3962 {
3963 uint32_t const uExitInfo1 = SelTSS;
3964 uint32_t uExitInfo2 = uErr;
3965 switch (enmTaskSwitch)
3966 {
3967 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3968 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3969 default: break;
3970 }
3971 if (fFlags & IEM_XCPT_FLAGS_ERR)
3972 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3973 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3974 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3975
3976 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3977 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3978 RT_NOREF2(uExitInfo1, uExitInfo2);
3979 }
3980
3981 /*
3982 * Check the current TSS limit. The last written byte to the current TSS during the
3983 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3984 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3985 *
3986 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3987 * end up with smaller than "legal" TSS limits.
3988 */
3989 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3990 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3991 if (uCurTSSLimit < uCurTSSLimitMin)
3992 {
3993 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3994 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3995 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3996 }
3997
3998 /*
3999 * Verify that the new TSS can be accessed and map it. Map only the required contents
4000 * and not the entire TSS.
4001 */
4002 void *pvNewTSS;
4003 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4004 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4005 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4006 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4007 * not perform correct translation if this happens. See Intel spec. 7.2.1
4008 * "Task-State Segment" */
4009 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4010 if (rcStrict != VINF_SUCCESS)
4011 {
4012 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4013 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4014 return rcStrict;
4015 }
4016
4017 /*
4018 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4019 */
4020 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4021 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4022 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4023 {
4024 PX86DESC pDescCurTSS;
4025 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4026 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4027 if (rcStrict != VINF_SUCCESS)
4028 {
4029 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4030 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4031 return rcStrict;
4032 }
4033
4034 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4035 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4036 if (rcStrict != VINF_SUCCESS)
4037 {
4038 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4039 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4040 return rcStrict;
4041 }
4042
4043 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4044 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4045 {
4046 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4047 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4048 u32EFlags &= ~X86_EFL_NT;
4049 }
4050 }
4051
4052 /*
4053 * Save the CPU state into the current TSS.
4054 */
4055 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4056 if (GCPtrNewTSS == GCPtrCurTSS)
4057 {
4058 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4059 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4060 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4061 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4062 pVCpu->cpum.GstCtx.ldtr.Sel));
4063 }
4064 if (fIsNewTSS386)
4065 {
4066 /*
4067 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4068 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4069 */
4070 void *pvCurTSS32;
4071 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4072 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4073 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4074 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4075 if (rcStrict != VINF_SUCCESS)
4076 {
4077 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4078 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4079 return rcStrict;
4080 }
4081
4082 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4083 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4084 pCurTSS32->eip = uNextEip;
4085 pCurTSS32->eflags = u32EFlags;
4086 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4087 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4088 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4089 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4090 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4091 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4092 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4093 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4094 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4095 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4096 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4097 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4098 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4099 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4100
4101 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4102 if (rcStrict != VINF_SUCCESS)
4103 {
4104 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4105 VBOXSTRICTRC_VAL(rcStrict)));
4106 return rcStrict;
4107 }
4108 }
4109 else
4110 {
4111 /*
4112 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4113 */
4114 void *pvCurTSS16;
4115 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4116 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4117 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4118 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4122 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4127 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4128 pCurTSS16->ip = uNextEip;
4129 pCurTSS16->flags = u32EFlags;
4130 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4131 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4132 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4133 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4134 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4135 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4136 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4137 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4138 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4139 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4140 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4141 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4142
4143 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4147 VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150 }
4151
4152 /*
4153 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4154 */
4155 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4156 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4157 {
4158 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4159 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4160 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4161 }
4162
4163 /*
4164 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4165 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4166 */
4167 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4168 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4169 bool fNewDebugTrap;
4170 if (fIsNewTSS386)
4171 {
4172 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4173 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4174 uNewEip = pNewTSS32->eip;
4175 uNewEflags = pNewTSS32->eflags;
4176 uNewEax = pNewTSS32->eax;
4177 uNewEcx = pNewTSS32->ecx;
4178 uNewEdx = pNewTSS32->edx;
4179 uNewEbx = pNewTSS32->ebx;
4180 uNewEsp = pNewTSS32->esp;
4181 uNewEbp = pNewTSS32->ebp;
4182 uNewEsi = pNewTSS32->esi;
4183 uNewEdi = pNewTSS32->edi;
4184 uNewES = pNewTSS32->es;
4185 uNewCS = pNewTSS32->cs;
4186 uNewSS = pNewTSS32->ss;
4187 uNewDS = pNewTSS32->ds;
4188 uNewFS = pNewTSS32->fs;
4189 uNewGS = pNewTSS32->gs;
4190 uNewLdt = pNewTSS32->selLdt;
4191 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4192 }
4193 else
4194 {
4195 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4196 uNewCr3 = 0;
4197 uNewEip = pNewTSS16->ip;
4198 uNewEflags = pNewTSS16->flags;
4199 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4200 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4201 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4202 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4203 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4204 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4205 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4206 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4207 uNewES = pNewTSS16->es;
4208 uNewCS = pNewTSS16->cs;
4209 uNewSS = pNewTSS16->ss;
4210 uNewDS = pNewTSS16->ds;
4211 uNewFS = 0;
4212 uNewGS = 0;
4213 uNewLdt = pNewTSS16->selLdt;
4214 fNewDebugTrap = false;
4215 }
4216
4217 if (GCPtrNewTSS == GCPtrCurTSS)
4218 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4219 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4220
4221 /*
4222 * We're done accessing the new TSS.
4223 */
4224 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4225 if (rcStrict != VINF_SUCCESS)
4226 {
4227 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4228 return rcStrict;
4229 }
4230
4231 /*
4232 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4233 */
4234 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4235 {
4236 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4237 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* Check that the descriptor indicates the new TSS is available (not busy). */
4246 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4247 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4248 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4249
4250 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4251 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4252 if (rcStrict != VINF_SUCCESS)
4253 {
4254 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4255 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4256 return rcStrict;
4257 }
4258 }
4259
4260 /*
4261 * From this point on, we're technically in the new task. We will defer exceptions
4262 * until the completion of the task switch but before executing any instructions in the new task.
4263 */
4264 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4265 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4266 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4267 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4268 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4269 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4271
4272 /* Set the busy bit in TR. */
4273 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4274 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4275 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4276 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4277 {
4278 uNewEflags |= X86_EFL_NT;
4279 }
4280
4281 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4282 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4284
4285 pVCpu->cpum.GstCtx.eip = uNewEip;
4286 pVCpu->cpum.GstCtx.eax = uNewEax;
4287 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4288 pVCpu->cpum.GstCtx.edx = uNewEdx;
4289 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4290 pVCpu->cpum.GstCtx.esp = uNewEsp;
4291 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4292 pVCpu->cpum.GstCtx.esi = uNewEsi;
4293 pVCpu->cpum.GstCtx.edi = uNewEdi;
4294
4295 uNewEflags &= X86_EFL_LIVE_MASK;
4296 uNewEflags |= X86_EFL_RA1_MASK;
4297 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4298
4299 /*
4300 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4301 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4302 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4303 */
4304 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4305 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4306
4307 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4308 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4309
4310 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4311 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4312
4313 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4314 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4315
4316 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4317 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4318
4319 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4320 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4321 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4322
4323 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4324 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4325 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4326 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4327
4328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4329 {
4330 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4337 }
4338
4339 /*
4340 * Switch CR3 for the new task.
4341 */
4342 if ( fIsNewTSS386
4343 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4344 {
4345 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4346 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4347 AssertRCSuccessReturn(rc, rc);
4348
4349 /* Inform PGM. */
4350 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4351 AssertRCReturn(rc, rc);
4352 /* ignore informational status codes */
4353
4354 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4355 }
4356
4357 /*
4358 * Switch LDTR for the new task.
4359 */
4360 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4361 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4362 else
4363 {
4364 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4365
4366 IEMSELDESC DescNewLdt;
4367 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4368 if (rcStrict != VINF_SUCCESS)
4369 {
4370 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4371 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4372 return rcStrict;
4373 }
4374 if ( !DescNewLdt.Legacy.Gen.u1Present
4375 || DescNewLdt.Legacy.Gen.u1DescType
4376 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4377 {
4378 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4379 uNewLdt, DescNewLdt.Legacy.u));
4380 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4381 }
4382
4383 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4384 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4385 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4386 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4387 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4388 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4389 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4391 }
4392
4393 IEMSELDESC DescSS;
4394 if (IEM_IS_V86_MODE(pVCpu))
4395 {
4396 pVCpu->iem.s.uCpl = 3;
4397 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4398 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4399 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4400 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4401 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4402 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4403
4404 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4405 DescSS.Legacy.u = 0;
4406 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4407 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4408 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4409 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4410 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4411 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4412 DescSS.Legacy.Gen.u2Dpl = 3;
4413 }
4414 else
4415 {
4416 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4417
4418 /*
4419 * Load the stack segment for the new task.
4420 */
4421 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4422 {
4423 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4424 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4425 }
4426
4427 /* Fetch the descriptor. */
4428 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4429 if (rcStrict != VINF_SUCCESS)
4430 {
4431 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4432 VBOXSTRICTRC_VAL(rcStrict)));
4433 return rcStrict;
4434 }
4435
4436 /* SS must be a data segment and writable. */
4437 if ( !DescSS.Legacy.Gen.u1DescType
4438 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4439 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4440 {
4441 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4442 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4443 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4444 }
4445
4446 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4447 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4448 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4449 {
4450 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4451 uNewCpl));
4452 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4453 }
4454
4455 /* Is it there? */
4456 if (!DescSS.Legacy.Gen.u1Present)
4457 {
4458 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4459 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4463 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4464
4465 /* Set the accessed bit before committing the result into SS. */
4466 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4467 {
4468 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4469 if (rcStrict != VINF_SUCCESS)
4470 return rcStrict;
4471 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4472 }
4473
4474 /* Commit SS. */
4475 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4476 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4477 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4478 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4479 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4480 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4481 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4482
4483 /* CPL has changed, update IEM before loading rest of segments. */
4484 pVCpu->iem.s.uCpl = uNewCpl;
4485
4486 /*
4487 * Load the data segments for the new task.
4488 */
4489 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4490 if (rcStrict != VINF_SUCCESS)
4491 return rcStrict;
4492 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4493 if (rcStrict != VINF_SUCCESS)
4494 return rcStrict;
4495 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4496 if (rcStrict != VINF_SUCCESS)
4497 return rcStrict;
4498 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4499 if (rcStrict != VINF_SUCCESS)
4500 return rcStrict;
4501
4502 /*
4503 * Load the code segment for the new task.
4504 */
4505 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4506 {
4507 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4508 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Fetch the descriptor. */
4512 IEMSELDESC DescCS;
4513 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4514 if (rcStrict != VINF_SUCCESS)
4515 {
4516 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4517 return rcStrict;
4518 }
4519
4520 /* CS must be a code segment. */
4521 if ( !DescCS.Legacy.Gen.u1DescType
4522 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4523 {
4524 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4525 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4527 }
4528
4529 /* For conforming CS, DPL must be less than or equal to the RPL. */
4530 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4531 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4532 {
4533 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4534 DescCS.Legacy.Gen.u2Dpl));
4535 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4536 }
4537
4538 /* For non-conforming CS, DPL must match RPL. */
4539 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4540 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4541 {
4542 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4543 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4544 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4545 }
4546
4547 /* Is it there? */
4548 if (!DescCS.Legacy.Gen.u1Present)
4549 {
4550 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4551 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4552 }
4553
4554 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4555 u64Base = X86DESC_BASE(&DescCS.Legacy);
4556
4557 /* Set the accessed bit before committing the result into CS. */
4558 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4559 {
4560 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4561 if (rcStrict != VINF_SUCCESS)
4562 return rcStrict;
4563 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4564 }
4565
4566 /* Commit CS. */
4567 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4568 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4569 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4570 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4571 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4572 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4573 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4574 }
4575
4576 /** @todo Debug trap. */
4577 if (fIsNewTSS386 && fNewDebugTrap)
4578 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4579
4580 /*
4581 * Construct the error code masks based on what caused this task switch.
4582 * See Intel Instruction reference for INT.
4583 */
4584 uint16_t uExt;
4585 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4586 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4587 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4588 {
4589 uExt = 1;
4590 }
4591 else
4592 uExt = 0;
4593
4594 /*
4595 * Push any error code on to the new stack.
4596 */
4597 if (fFlags & IEM_XCPT_FLAGS_ERR)
4598 {
4599 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4600 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4601 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4602
4603 /* Check that there is sufficient space on the stack. */
4604 /** @todo Factor out segment limit checking for normal/expand down segments
4605 * into a separate function. */
4606 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4607 {
4608 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4609 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4610 {
4611 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4612 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4613 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4614 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4615 }
4616 }
4617 else
4618 {
4619 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4620 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4621 {
4622 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4623 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4624 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4625 }
4626 }
4627
4628
4629 if (fIsNewTSS386)
4630 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4631 else
4632 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4633 if (rcStrict != VINF_SUCCESS)
4634 {
4635 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4636 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4637 return rcStrict;
4638 }
4639 }
4640
4641 /* Check the new EIP against the new CS limit. */
4642 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4643 {
4644 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4645 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4646 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4647 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4648 }
4649
4650 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4651 pVCpu->cpum.GstCtx.ss.Sel));
4652 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4653}
4654
4655
4656/**
4657 * Implements exceptions and interrupts for protected mode.
4658 *
4659 * @returns VBox strict status code.
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param cbInstr The number of bytes to offset rIP by in the return
4662 * address.
4663 * @param u8Vector The interrupt / exception vector number.
4664 * @param fFlags The flags.
4665 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4666 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4667 */
4668IEM_STATIC VBOXSTRICTRC
4669iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4670 uint8_t cbInstr,
4671 uint8_t u8Vector,
4672 uint32_t fFlags,
4673 uint16_t uErr,
4674 uint64_t uCr2)
4675{
4676 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4677
4678 /*
4679 * Read the IDT entry.
4680 */
4681 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4682 {
4683 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4684 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4685 }
4686 X86DESC Idte;
4687 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4688 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4690 {
4691 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4692 return rcStrict;
4693 }
4694 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4695 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4696 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4697
4698 /*
4699 * Check the descriptor type, DPL and such.
4700 * ASSUMES this is done in the same order as described for call-gate calls.
4701 */
4702 if (Idte.Gate.u1DescType)
4703 {
4704 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4705 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4706 }
4707 bool fTaskGate = false;
4708 uint8_t f32BitGate = true;
4709 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4710 switch (Idte.Gate.u4Type)
4711 {
4712 case X86_SEL_TYPE_SYS_UNDEFINED:
4713 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4714 case X86_SEL_TYPE_SYS_LDT:
4715 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4716 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4717 case X86_SEL_TYPE_SYS_UNDEFINED2:
4718 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4719 case X86_SEL_TYPE_SYS_UNDEFINED3:
4720 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4721 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4722 case X86_SEL_TYPE_SYS_UNDEFINED4:
4723 {
4724 /** @todo check what actually happens when the type is wrong...
4725 * esp. call gates. */
4726 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4727 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4728 }
4729
4730 case X86_SEL_TYPE_SYS_286_INT_GATE:
4731 f32BitGate = false;
4732 RT_FALL_THRU();
4733 case X86_SEL_TYPE_SYS_386_INT_GATE:
4734 fEflToClear |= X86_EFL_IF;
4735 break;
4736
4737 case X86_SEL_TYPE_SYS_TASK_GATE:
4738 fTaskGate = true;
4739#ifndef IEM_IMPLEMENTS_TASKSWITCH
4740 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4741#endif
4742 break;
4743
4744 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4745 f32BitGate = false;
4746 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4747 break;
4748
4749 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4750 }
4751
4752 /* Check DPL against CPL if applicable. */
4753 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4754 {
4755 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4756 {
4757 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4758 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4759 }
4760 }
4761
4762 /* Is it there? */
4763 if (!Idte.Gate.u1Present)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4766 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768
4769 /* Is it a task-gate? */
4770 if (fTaskGate)
4771 {
4772 /*
4773 * Construct the error code masks based on what caused this task switch.
4774 * See Intel Instruction reference for INT.
4775 */
4776 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4777 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4778 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4779 RTSEL SelTSS = Idte.Gate.u16Sel;
4780
4781 /*
4782 * Fetch the TSS descriptor in the GDT.
4783 */
4784 IEMSELDESC DescTSS;
4785 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4786 if (rcStrict != VINF_SUCCESS)
4787 {
4788 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4789 VBOXSTRICTRC_VAL(rcStrict)));
4790 return rcStrict;
4791 }
4792
4793 /* The TSS descriptor must be a system segment and be available (not busy). */
4794 if ( DescTSS.Legacy.Gen.u1DescType
4795 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4796 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4797 {
4798 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4799 u8Vector, SelTSS, DescTSS.Legacy.au64));
4800 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4801 }
4802
4803 /* The TSS must be present. */
4804 if (!DescTSS.Legacy.Gen.u1Present)
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4807 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4808 }
4809
4810 /* Do the actual task switch. */
4811 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4812 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4813 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4814 }
4815
4816 /* A null CS is bad. */
4817 RTSEL NewCS = Idte.Gate.u16Sel;
4818 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4821 return iemRaiseGeneralProtectionFault0(pVCpu);
4822 }
4823
4824 /* Fetch the descriptor for the new CS. */
4825 IEMSELDESC DescCS;
4826 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4827 if (rcStrict != VINF_SUCCESS)
4828 {
4829 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4830 return rcStrict;
4831 }
4832
4833 /* Must be a code segment. */
4834 if (!DescCS.Legacy.Gen.u1DescType)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4837 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4838 }
4839 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4842 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4843 }
4844
4845 /* Don't allow lowering the privilege level. */
4846 /** @todo Does the lowering of privileges apply to software interrupts
4847 * only? This has bearings on the more-privileged or
4848 * same-privilege stack behavior further down. A testcase would
4849 * be nice. */
4850 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4853 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4854 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4855 }
4856
4857 /* Make sure the selector is present. */
4858 if (!DescCS.Legacy.Gen.u1Present)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4861 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4862 }
4863
4864 /* Check the new EIP against the new CS limit. */
4865 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4866 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4867 ? Idte.Gate.u16OffsetLow
4868 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4869 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4870 if (uNewEip > cbLimitCS)
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4873 u8Vector, uNewEip, cbLimitCS, NewCS));
4874 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4875 }
4876 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4877
4878 /* Calc the flag image to push. */
4879 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4880 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4881 fEfl &= ~X86_EFL_RF;
4882 else
4883 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4884
4885 /* From V8086 mode only go to CPL 0. */
4886 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4887 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4888 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4889 {
4890 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4891 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4892 }
4893
4894 /*
4895 * If the privilege level changes, we need to get a new stack from the TSS.
4896 * This in turns means validating the new SS and ESP...
4897 */
4898 if (uNewCpl != pVCpu->iem.s.uCpl)
4899 {
4900 RTSEL NewSS;
4901 uint32_t uNewEsp;
4902 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4903 if (rcStrict != VINF_SUCCESS)
4904 return rcStrict;
4905
4906 IEMSELDESC DescSS;
4907 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4908 if (rcStrict != VINF_SUCCESS)
4909 return rcStrict;
4910 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4911 if (!DescSS.Legacy.Gen.u1DefBig)
4912 {
4913 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4914 uNewEsp = (uint16_t)uNewEsp;
4915 }
4916
4917 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4918
4919 /* Check that there is sufficient space for the stack frame. */
4920 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4921 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4922 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4923 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4924
4925 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4926 {
4927 if ( uNewEsp - 1 > cbLimitSS
4928 || uNewEsp < cbStackFrame)
4929 {
4930 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4931 u8Vector, NewSS, uNewEsp, cbStackFrame));
4932 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4933 }
4934 }
4935 else
4936 {
4937 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4938 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4939 {
4940 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4941 u8Vector, NewSS, uNewEsp, cbStackFrame));
4942 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4943 }
4944 }
4945
4946 /*
4947 * Start making changes.
4948 */
4949
4950 /* Set the new CPL so that stack accesses use it. */
4951 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4952 pVCpu->iem.s.uCpl = uNewCpl;
4953
4954 /* Create the stack frame. */
4955 RTPTRUNION uStackFrame;
4956 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4957 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4958 if (rcStrict != VINF_SUCCESS)
4959 return rcStrict;
4960 void * const pvStackFrame = uStackFrame.pv;
4961 if (f32BitGate)
4962 {
4963 if (fFlags & IEM_XCPT_FLAGS_ERR)
4964 *uStackFrame.pu32++ = uErr;
4965 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4966 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4967 uStackFrame.pu32[2] = fEfl;
4968 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4969 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4970 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4971 if (fEfl & X86_EFL_VM)
4972 {
4973 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4974 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4975 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4976 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4977 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4978 }
4979 }
4980 else
4981 {
4982 if (fFlags & IEM_XCPT_FLAGS_ERR)
4983 *uStackFrame.pu16++ = uErr;
4984 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4985 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4986 uStackFrame.pu16[2] = fEfl;
4987 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4988 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4989 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4990 if (fEfl & X86_EFL_VM)
4991 {
4992 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4993 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4994 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4995 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4996 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4997 }
4998 }
4999 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5000 if (rcStrict != VINF_SUCCESS)
5001 return rcStrict;
5002
5003 /* Mark the selectors 'accessed' (hope this is the correct time). */
5004 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5005 * after pushing the stack frame? (Write protect the gdt + stack to
5006 * find out.) */
5007 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5008 {
5009 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5010 if (rcStrict != VINF_SUCCESS)
5011 return rcStrict;
5012 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5013 }
5014
5015 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 /*
5024 * Start comitting the register changes (joins with the DPL=CPL branch).
5025 */
5026 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5027 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5028 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5029 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5030 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5031 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5032 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5033 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5034 * SP is loaded).
5035 * Need to check the other combinations too:
5036 * - 16-bit TSS, 32-bit handler
5037 * - 32-bit TSS, 16-bit handler */
5038 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5039 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5040 else
5041 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5042
5043 if (fEfl & X86_EFL_VM)
5044 {
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5048 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5049 }
5050 }
5051 /*
5052 * Same privilege, no stack change and smaller stack frame.
5053 */
5054 else
5055 {
5056 uint64_t uNewRsp;
5057 RTPTRUNION uStackFrame;
5058 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5059 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5060 if (rcStrict != VINF_SUCCESS)
5061 return rcStrict;
5062 void * const pvStackFrame = uStackFrame.pv;
5063
5064 if (f32BitGate)
5065 {
5066 if (fFlags & IEM_XCPT_FLAGS_ERR)
5067 *uStackFrame.pu32++ = uErr;
5068 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5069 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5070 uStackFrame.pu32[2] = fEfl;
5071 }
5072 else
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu16++ = uErr;
5076 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu16[2] = fEfl;
5079 }
5080 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5081 if (rcStrict != VINF_SUCCESS)
5082 return rcStrict;
5083
5084 /* Mark the CS selector as 'accessed'. */
5085 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5086 {
5087 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5088 if (rcStrict != VINF_SUCCESS)
5089 return rcStrict;
5090 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5091 }
5092
5093 /*
5094 * Start committing the register changes (joins with the other branch).
5095 */
5096 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5097 }
5098
5099 /* ... register committing continues. */
5100 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5102 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5103 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5104 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5105 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5106
5107 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5108 fEfl &= ~fEflToClear;
5109 IEMMISC_SET_EFL(pVCpu, fEfl);
5110
5111 if (fFlags & IEM_XCPT_FLAGS_CR2)
5112 pVCpu->cpum.GstCtx.cr2 = uCr2;
5113
5114 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5115 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5116
5117 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5118}
5119
5120
5121/**
5122 * Implements exceptions and interrupts for long mode.
5123 *
5124 * @returns VBox strict status code.
5125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5126 * @param cbInstr The number of bytes to offset rIP by in the return
5127 * address.
5128 * @param u8Vector The interrupt / exception vector number.
5129 * @param fFlags The flags.
5130 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5131 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5132 */
5133IEM_STATIC VBOXSTRICTRC
5134iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5135 uint8_t cbInstr,
5136 uint8_t u8Vector,
5137 uint32_t fFlags,
5138 uint16_t uErr,
5139 uint64_t uCr2)
5140{
5141 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5142
5143 /*
5144 * Read the IDT entry.
5145 */
5146 uint16_t offIdt = (uint16_t)u8Vector << 4;
5147 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5148 {
5149 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5150 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5151 }
5152 X86DESC64 Idte;
5153 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5154 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5155 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5156 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5157 {
5158 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5159 return rcStrict;
5160 }
5161 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5162 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5163 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5164
5165 /*
5166 * Check the descriptor type, DPL and such.
5167 * ASSUMES this is done in the same order as described for call-gate calls.
5168 */
5169 if (Idte.Gate.u1DescType)
5170 {
5171 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5172 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5173 }
5174 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5175 switch (Idte.Gate.u4Type)
5176 {
5177 case AMD64_SEL_TYPE_SYS_INT_GATE:
5178 fEflToClear |= X86_EFL_IF;
5179 break;
5180 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5181 break;
5182
5183 default:
5184 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5185 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5186 }
5187
5188 /* Check DPL against CPL if applicable. */
5189 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5190 {
5191 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5192 {
5193 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5194 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5195 }
5196 }
5197
5198 /* Is it there? */
5199 if (!Idte.Gate.u1Present)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5202 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204
5205 /* A null CS is bad. */
5206 RTSEL NewCS = Idte.Gate.u16Sel;
5207 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5210 return iemRaiseGeneralProtectionFault0(pVCpu);
5211 }
5212
5213 /* Fetch the descriptor for the new CS. */
5214 IEMSELDESC DescCS;
5215 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5216 if (rcStrict != VINF_SUCCESS)
5217 {
5218 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5219 return rcStrict;
5220 }
5221
5222 /* Must be a 64-bit code segment. */
5223 if (!DescCS.Long.Gen.u1DescType)
5224 {
5225 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5226 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5227 }
5228 if ( !DescCS.Long.Gen.u1Long
5229 || DescCS.Long.Gen.u1DefBig
5230 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5231 {
5232 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5233 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5234 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5235 }
5236
5237 /* Don't allow lowering the privilege level. For non-conforming CS
5238 selectors, the CS.DPL sets the privilege level the trap/interrupt
5239 handler runs at. For conforming CS selectors, the CPL remains
5240 unchanged, but the CS.DPL must be <= CPL. */
5241 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5242 * when CPU in Ring-0. Result \#GP? */
5243 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5246 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5247 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5248 }
5249
5250
5251 /* Make sure the selector is present. */
5252 if (!DescCS.Legacy.Gen.u1Present)
5253 {
5254 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5255 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5256 }
5257
5258 /* Check that the new RIP is canonical. */
5259 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5260 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5261 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5262 if (!IEM_IS_CANONICAL(uNewRip))
5263 {
5264 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5265 return iemRaiseGeneralProtectionFault0(pVCpu);
5266 }
5267
5268 /*
5269 * If the privilege level changes or if the IST isn't zero, we need to get
5270 * a new stack from the TSS.
5271 */
5272 uint64_t uNewRsp;
5273 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5274 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5275 if ( uNewCpl != pVCpu->iem.s.uCpl
5276 || Idte.Gate.u3IST != 0)
5277 {
5278 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5279 if (rcStrict != VINF_SUCCESS)
5280 return rcStrict;
5281 }
5282 else
5283 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5284 uNewRsp &= ~(uint64_t)0xf;
5285
5286 /*
5287 * Calc the flag image to push.
5288 */
5289 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5290 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5291 fEfl &= ~X86_EFL_RF;
5292 else
5293 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5294
5295 /*
5296 * Start making changes.
5297 */
5298 /* Set the new CPL so that stack accesses use it. */
5299 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5300 pVCpu->iem.s.uCpl = uNewCpl;
5301
5302 /* Create the stack frame. */
5303 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5304 RTPTRUNION uStackFrame;
5305 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5306 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5307 if (rcStrict != VINF_SUCCESS)
5308 return rcStrict;
5309 void * const pvStackFrame = uStackFrame.pv;
5310
5311 if (fFlags & IEM_XCPT_FLAGS_ERR)
5312 *uStackFrame.pu64++ = uErr;
5313 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5314 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5315 uStackFrame.pu64[2] = fEfl;
5316 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5317 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5318 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321
5322 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5323 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5324 * after pushing the stack frame? (Write protect the gdt + stack to
5325 * find out.) */
5326 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5327 {
5328 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5329 if (rcStrict != VINF_SUCCESS)
5330 return rcStrict;
5331 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5332 }
5333
5334 /*
5335 * Start comitting the register changes.
5336 */
5337 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5338 * hidden registers when interrupting 32-bit or 16-bit code! */
5339 if (uNewCpl != uOldCpl)
5340 {
5341 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5342 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5343 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5344 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5345 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5346 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5347 }
5348 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5349 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5350 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5351 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5352 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5353 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5354 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5355 pVCpu->cpum.GstCtx.rip = uNewRip;
5356
5357 fEfl &= ~fEflToClear;
5358 IEMMISC_SET_EFL(pVCpu, fEfl);
5359
5360 if (fFlags & IEM_XCPT_FLAGS_CR2)
5361 pVCpu->cpum.GstCtx.cr2 = uCr2;
5362
5363 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5364 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5365
5366 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5367}
5368
5369
5370/**
5371 * Implements exceptions and interrupts.
5372 *
5373 * All exceptions and interrupts goes thru this function!
5374 *
5375 * @returns VBox strict status code.
5376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5377 * @param cbInstr The number of bytes to offset rIP by in the return
5378 * address.
5379 * @param u8Vector The interrupt / exception vector number.
5380 * @param fFlags The flags.
5381 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5382 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5383 */
5384DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5385iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5386 uint8_t cbInstr,
5387 uint8_t u8Vector,
5388 uint32_t fFlags,
5389 uint16_t uErr,
5390 uint64_t uCr2)
5391{
5392 /*
5393 * Get all the state that we might need here.
5394 */
5395 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5397
5398#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5399 /*
5400 * Flush prefetch buffer
5401 */
5402 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5403#endif
5404
5405 /*
5406 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5407 */
5408 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5409 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5410 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5411 | IEM_XCPT_FLAGS_BP_INSTR
5412 | IEM_XCPT_FLAGS_ICEBP_INSTR
5413 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5414 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5415 {
5416 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5417 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5418 u8Vector = X86_XCPT_GP;
5419 uErr = 0;
5420 }
5421#ifdef DBGFTRACE_ENABLED
5422 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5423 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5424 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5425#endif
5426
5427 /*
5428 * Evaluate whether NMI blocking should be in effect.
5429 * Normally, NMI blocking is in effect whenever we inject an NMI.
5430 */
5431 bool fBlockNmi;
5432 if ( u8Vector == X86_XCPT_NMI
5433 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5434 fBlockNmi = true;
5435 else
5436 fBlockNmi = false;
5437
5438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5439 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5440 {
5441 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5442 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5443 return rcStrict0;
5444
5445 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5446 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5447 {
5448 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5449 fBlockNmi = false;
5450 }
5451 }
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5455 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 *
5462 * See AMD spec. 15.20 "Event Injection".
5463 */
5464 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5465 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5466 else
5467 {
5468 /*
5469 * Check and handle if the event being raised is intercepted.
5470 */
5471 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5472 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5473 return rcStrict0;
5474 }
5475 }
5476#endif
5477
5478 /*
5479 * Set NMI blocking if necessary.
5480 */
5481 if ( fBlockNmi
5482 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5483 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5484
5485 /*
5486 * Do recursion accounting.
5487 */
5488 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5489 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5490 if (pVCpu->iem.s.cXcptRecursions == 0)
5491 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5492 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5493 else
5494 {
5495 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5496 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5497 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5498
5499 if (pVCpu->iem.s.cXcptRecursions >= 4)
5500 {
5501#ifdef DEBUG_bird
5502 AssertFailed();
5503#endif
5504 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5505 }
5506
5507 /*
5508 * Evaluate the sequence of recurring events.
5509 */
5510 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5511 NULL /* pXcptRaiseInfo */);
5512 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5513 { /* likely */ }
5514 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5515 {
5516 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5517 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5518 u8Vector = X86_XCPT_DF;
5519 uErr = 0;
5520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5521 /* VMX nested-guest #DF intercept needs to be checked here. */
5522 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5523 {
5524 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5525 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5526 return rcStrict0;
5527 }
5528#endif
5529 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5530 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5531 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5532 }
5533 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5534 {
5535 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5536 return iemInitiateCpuShutdown(pVCpu);
5537 }
5538 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5539 {
5540 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5541 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5542 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5543 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5544 return VERR_EM_GUEST_CPU_HANG;
5545 }
5546 else
5547 {
5548 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5549 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5550 return VERR_IEM_IPE_9;
5551 }
5552
5553 /*
5554 * The 'EXT' bit is set when an exception occurs during deliver of an external
5555 * event (such as an interrupt or earlier exception)[1]. Privileged software
5556 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5557 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5558 *
5559 * [1] - Intel spec. 6.13 "Error Code"
5560 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5561 * [3] - Intel Instruction reference for INT n.
5562 */
5563 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5564 && (fFlags & IEM_XCPT_FLAGS_ERR)
5565 && u8Vector != X86_XCPT_PF
5566 && u8Vector != X86_XCPT_DF)
5567 {
5568 uErr |= X86_TRAP_ERR_EXTERNAL;
5569 }
5570 }
5571
5572 pVCpu->iem.s.cXcptRecursions++;
5573 pVCpu->iem.s.uCurXcpt = u8Vector;
5574 pVCpu->iem.s.fCurXcpt = fFlags;
5575 pVCpu->iem.s.uCurXcptErr = uErr;
5576 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5577
5578 /*
5579 * Extensive logging.
5580 */
5581#if defined(LOG_ENABLED) && defined(IN_RING3)
5582 if (LogIs3Enabled())
5583 {
5584 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5585 PVM pVM = pVCpu->CTX_SUFF(pVM);
5586 char szRegs[4096];
5587 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5588 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5589 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5590 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5591 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5592 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5593 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5594 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5595 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5596 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5597 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5598 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5599 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5600 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5601 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5602 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5603 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5604 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5605 " efer=%016VR{efer}\n"
5606 " pat=%016VR{pat}\n"
5607 " sf_mask=%016VR{sf_mask}\n"
5608 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5609 " lstar=%016VR{lstar}\n"
5610 " star=%016VR{star} cstar=%016VR{cstar}\n"
5611 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5612 );
5613
5614 char szInstr[256];
5615 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5616 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5617 szInstr, sizeof(szInstr), NULL);
5618 Log3(("%s%s\n", szRegs, szInstr));
5619 }
5620#endif /* LOG_ENABLED */
5621
5622 /*
5623 * Call the mode specific worker function.
5624 */
5625 VBOXSTRICTRC rcStrict;
5626 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5627 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5628 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5629 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5630 else
5631 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5632
5633 /* Flush the prefetch buffer. */
5634#ifdef IEM_WITH_CODE_TLB
5635 pVCpu->iem.s.pbInstrBuf = NULL;
5636#else
5637 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5638#endif
5639
5640 /*
5641 * Unwind.
5642 */
5643 pVCpu->iem.s.cXcptRecursions--;
5644 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5645 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5646 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5647 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5648 pVCpu->iem.s.cXcptRecursions + 1));
5649 return rcStrict;
5650}
5651
5652#ifdef IEM_WITH_SETJMP
5653/**
5654 * See iemRaiseXcptOrInt. Will not return.
5655 */
5656IEM_STATIC DECL_NO_RETURN(void)
5657iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5658 uint8_t cbInstr,
5659 uint8_t u8Vector,
5660 uint32_t fFlags,
5661 uint16_t uErr,
5662 uint64_t uCr2)
5663{
5664 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5665 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5666}
5667#endif
5668
5669
5670/** \#DE - 00. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5674}
5675
5676
5677/** \#DB - 01.
5678 * @note This automatically clear DR7.GD. */
5679DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5680{
5681 /** @todo set/clear RF. */
5682 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5684}
5685
5686
5687/** \#BR - 05. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5691}
5692
5693
5694/** \#UD - 06. */
5695DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5696{
5697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5698}
5699
5700
5701/** \#NM - 07. */
5702DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5703{
5704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5705}
5706
5707
5708/** \#TS(err) - 0a. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5712}
5713
5714
5715/** \#TS(tr) - 0a. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5719 pVCpu->cpum.GstCtx.tr.Sel, 0);
5720}
5721
5722
5723/** \#TS(0) - 0a. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5727 0, 0);
5728}
5729
5730
5731/** \#TS(err) - 0a. */
5732DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5733{
5734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5735 uSel & X86_SEL_MASK_OFF_RPL, 0);
5736}
5737
5738
5739/** \#NP(err) - 0b. */
5740DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5741{
5742 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5743}
5744
5745
5746/** \#NP(sel) - 0b. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5750 uSel & ~X86_SEL_RPL, 0);
5751}
5752
5753
5754/** \#SS(seg) - 0c. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5756{
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5758 uSel & ~X86_SEL_RPL, 0);
5759}
5760
5761
5762/** \#SS(err) - 0c. */
5763DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5764{
5765 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5766}
5767
5768
5769/** \#GP(n) - 0d. */
5770DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5771{
5772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5773}
5774
5775
5776/** \#GP(0) - 0d. */
5777DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5778{
5779 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5780}
5781
5782#ifdef IEM_WITH_SETJMP
5783/** \#GP(0) - 0d. */
5784DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5785{
5786 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5787}
5788#endif
5789
5790
5791/** \#GP(sel) - 0d. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5793{
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5795 Sel & ~X86_SEL_RPL, 0);
5796}
5797
5798
5799/** \#GP(0) - 0d. */
5800DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5801{
5802 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5803}
5804
5805
5806/** \#GP(sel) - 0d. */
5807DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5808{
5809 NOREF(iSegReg); NOREF(fAccess);
5810 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5811 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5812}
5813
5814#ifdef IEM_WITH_SETJMP
5815/** \#GP(sel) - 0d, longjmp. */
5816DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5817{
5818 NOREF(iSegReg); NOREF(fAccess);
5819 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5820 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5821}
5822#endif
5823
5824/** \#GP(sel) - 0d. */
5825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5826{
5827 NOREF(Sel);
5828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5829}
5830
5831#ifdef IEM_WITH_SETJMP
5832/** \#GP(sel) - 0d, longjmp. */
5833DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5834{
5835 NOREF(Sel);
5836 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5837}
5838#endif
5839
5840
5841/** \#GP(sel) - 0d. */
5842DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5843{
5844 NOREF(iSegReg); NOREF(fAccess);
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5846}
5847
5848#ifdef IEM_WITH_SETJMP
5849/** \#GP(sel) - 0d, longjmp. */
5850DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5851 uint32_t fAccess)
5852{
5853 NOREF(iSegReg); NOREF(fAccess);
5854 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5855}
5856#endif
5857
5858
5859/** \#PF(n) - 0e. */
5860DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5861{
5862 uint16_t uErr;
5863 switch (rc)
5864 {
5865 case VERR_PAGE_NOT_PRESENT:
5866 case VERR_PAGE_TABLE_NOT_PRESENT:
5867 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5868 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5869 uErr = 0;
5870 break;
5871
5872 default:
5873 AssertMsgFailed(("%Rrc\n", rc));
5874 RT_FALL_THRU();
5875 case VERR_ACCESS_DENIED:
5876 uErr = X86_TRAP_PF_P;
5877 break;
5878
5879 /** @todo reserved */
5880 }
5881
5882 if (pVCpu->iem.s.uCpl == 3)
5883 uErr |= X86_TRAP_PF_US;
5884
5885 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5886 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5887 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5888 uErr |= X86_TRAP_PF_ID;
5889
5890#if 0 /* This is so much non-sense, really. Why was it done like that? */
5891 /* Note! RW access callers reporting a WRITE protection fault, will clear
5892 the READ flag before calling. So, read-modify-write accesses (RW)
5893 can safely be reported as READ faults. */
5894 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5895 uErr |= X86_TRAP_PF_RW;
5896#else
5897 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5898 {
5899 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5900 uErr |= X86_TRAP_PF_RW;
5901 }
5902#endif
5903
5904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5905 uErr, GCPtrWhere);
5906}
5907
5908#ifdef IEM_WITH_SETJMP
5909/** \#PF(n) - 0e, longjmp. */
5910IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5911{
5912 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5913}
5914#endif
5915
5916
5917/** \#MF(0) - 10. */
5918DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5919{
5920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5921}
5922
5923
5924/** \#AC(0) - 11. */
5925DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5926{
5927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5928}
5929
5930
5931/**
5932 * Macro for calling iemCImplRaiseDivideError().
5933 *
5934 * This enables us to add/remove arguments and force different levels of
5935 * inlining as we wish.
5936 *
5937 * @return Strict VBox status code.
5938 */
5939#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5940IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5941{
5942 NOREF(cbInstr);
5943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5944}
5945
5946
5947/**
5948 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5949 *
5950 * This enables us to add/remove arguments and force different levels of
5951 * inlining as we wish.
5952 *
5953 * @return Strict VBox status code.
5954 */
5955#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5956IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5957{
5958 NOREF(cbInstr);
5959 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5960}
5961
5962
5963/**
5964 * Macro for calling iemCImplRaiseInvalidOpcode().
5965 *
5966 * This enables us to add/remove arguments and force different levels of
5967 * inlining as we wish.
5968 *
5969 * @return Strict VBox status code.
5970 */
5971#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5972IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5973{
5974 NOREF(cbInstr);
5975 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5976}
5977
5978
5979/** @} */
5980
5981
5982/*
5983 *
5984 * Helpers routines.
5985 * Helpers routines.
5986 * Helpers routines.
5987 *
5988 */
5989
5990/**
5991 * Recalculates the effective operand size.
5992 *
5993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5994 */
5995IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
5996{
5997 switch (pVCpu->iem.s.enmCpuMode)
5998 {
5999 case IEMMODE_16BIT:
6000 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6001 break;
6002 case IEMMODE_32BIT:
6003 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6004 break;
6005 case IEMMODE_64BIT:
6006 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6007 {
6008 case 0:
6009 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6010 break;
6011 case IEM_OP_PRF_SIZE_OP:
6012 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6013 break;
6014 case IEM_OP_PRF_SIZE_REX_W:
6015 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6016 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6017 break;
6018 }
6019 break;
6020 default:
6021 AssertFailed();
6022 }
6023}
6024
6025
6026/**
6027 * Sets the default operand size to 64-bit and recalculates the effective
6028 * operand size.
6029 *
6030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6031 */
6032IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6033{
6034 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6035 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6036 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6037 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6038 else
6039 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6040}
6041
6042
6043/*
6044 *
6045 * Common opcode decoders.
6046 * Common opcode decoders.
6047 * Common opcode decoders.
6048 *
6049 */
6050//#include <iprt/mem.h>
6051
6052/**
6053 * Used to add extra details about a stub case.
6054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6055 */
6056IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6057{
6058#if defined(LOG_ENABLED) && defined(IN_RING3)
6059 PVM pVM = pVCpu->CTX_SUFF(pVM);
6060 char szRegs[4096];
6061 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6062 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6063 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6064 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6065 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6066 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6067 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6068 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6069 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6070 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6071 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6072 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6073 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6074 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6075 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6076 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6077 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6078 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6079 " efer=%016VR{efer}\n"
6080 " pat=%016VR{pat}\n"
6081 " sf_mask=%016VR{sf_mask}\n"
6082 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6083 " lstar=%016VR{lstar}\n"
6084 " star=%016VR{star} cstar=%016VR{cstar}\n"
6085 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6086 );
6087
6088 char szInstr[256];
6089 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6090 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6091 szInstr, sizeof(szInstr), NULL);
6092
6093 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6094#else
6095 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6096#endif
6097}
6098
6099/**
6100 * Complains about a stub.
6101 *
6102 * Providing two versions of this macro, one for daily use and one for use when
6103 * working on IEM.
6104 */
6105#if 0
6106# define IEMOP_BITCH_ABOUT_STUB() \
6107 do { \
6108 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6109 iemOpStubMsg2(pVCpu); \
6110 RTAssertPanic(); \
6111 } while (0)
6112#else
6113# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6114#endif
6115
6116/** Stubs an opcode. */
6117#define FNIEMOP_STUB(a_Name) \
6118 FNIEMOP_DEF(a_Name) \
6119 { \
6120 RT_NOREF_PV(pVCpu); \
6121 IEMOP_BITCH_ABOUT_STUB(); \
6122 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6123 } \
6124 typedef int ignore_semicolon
6125
6126/** Stubs an opcode. */
6127#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6128 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6129 { \
6130 RT_NOREF_PV(pVCpu); \
6131 RT_NOREF_PV(a_Name0); \
6132 IEMOP_BITCH_ABOUT_STUB(); \
6133 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6134 } \
6135 typedef int ignore_semicolon
6136
6137/** Stubs an opcode which currently should raise \#UD. */
6138#define FNIEMOP_UD_STUB(a_Name) \
6139 FNIEMOP_DEF(a_Name) \
6140 { \
6141 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6142 return IEMOP_RAISE_INVALID_OPCODE(); \
6143 } \
6144 typedef int ignore_semicolon
6145
6146/** Stubs an opcode which currently should raise \#UD. */
6147#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6148 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6149 { \
6150 RT_NOREF_PV(pVCpu); \
6151 RT_NOREF_PV(a_Name0); \
6152 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6153 return IEMOP_RAISE_INVALID_OPCODE(); \
6154 } \
6155 typedef int ignore_semicolon
6156
6157
6158
6159/** @name Register Access.
6160 * @{
6161 */
6162
6163/**
6164 * Gets a reference (pointer) to the specified hidden segment register.
6165 *
6166 * @returns Hidden register reference.
6167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6168 * @param iSegReg The segment register.
6169 */
6170IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6171{
6172 Assert(iSegReg < X86_SREG_COUNT);
6173 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6174 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6175
6176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6177 return pSReg;
6178}
6179
6180
6181/**
6182 * Ensures that the given hidden segment register is up to date.
6183 *
6184 * @returns Hidden register reference.
6185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6186 * @param pSReg The segment register.
6187 */
6188IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6189{
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 NOREF(pVCpu);
6192 return pSReg;
6193}
6194
6195
6196/**
6197 * Gets a reference (pointer) to the specified segment register (the selector
6198 * value).
6199 *
6200 * @returns Pointer to the selector variable.
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 * @param iSegReg The segment register.
6203 */
6204DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6205{
6206 Assert(iSegReg < X86_SREG_COUNT);
6207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6208 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6209}
6210
6211
6212/**
6213 * Fetches the selector value of a segment register.
6214 *
6215 * @returns The selector value.
6216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6217 * @param iSegReg The segment register.
6218 */
6219DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6220{
6221 Assert(iSegReg < X86_SREG_COUNT);
6222 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6223 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6224}
6225
6226
6227/**
6228 * Fetches the base address value of a segment register.
6229 *
6230 * @returns The selector value.
6231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6232 * @param iSegReg The segment register.
6233 */
6234DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6235{
6236 Assert(iSegReg < X86_SREG_COUNT);
6237 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6238 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6239}
6240
6241
6242/**
6243 * Gets a reference (pointer) to the specified general purpose register.
6244 *
6245 * @returns Register reference.
6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6247 * @param iReg The general purpose register.
6248 */
6249DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6250{
6251 Assert(iReg < 16);
6252 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6253}
6254
6255
6256/**
6257 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6258 *
6259 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6260 *
6261 * @returns Register reference.
6262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6263 * @param iReg The register.
6264 */
6265DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6266{
6267 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6268 {
6269 Assert(iReg < 16);
6270 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6271 }
6272 /* high 8-bit register. */
6273 Assert(iReg < 8);
6274 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6275}
6276
6277
6278/**
6279 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6280 *
6281 * @returns Register reference.
6282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6283 * @param iReg The register.
6284 */
6285DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6286{
6287 Assert(iReg < 16);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6308 *
6309 * @returns Register reference.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6314{
6315 Assert(iReg < 64);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified segment register's base address.
6322 *
6323 * @returns Segment register base address reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iSegReg The segment selector.
6326 */
6327DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6328{
6329 Assert(iSegReg < X86_SREG_COUNT);
6330 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6331 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6332}
6333
6334
6335/**
6336 * Fetches the value of a 8-bit general purpose register.
6337 *
6338 * @returns The register value.
6339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6340 * @param iReg The register.
6341 */
6342DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6343{
6344 return *iemGRegRefU8(pVCpu, iReg);
6345}
6346
6347
6348/**
6349 * Fetches the value of a 16-bit general purpose register.
6350 *
6351 * @returns The register value.
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param iReg The register.
6354 */
6355DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6356{
6357 Assert(iReg < 16);
6358 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6359}
6360
6361
6362/**
6363 * Fetches the value of a 32-bit general purpose register.
6364 *
6365 * @returns The register value.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6373}
6374
6375
6376/**
6377 * Fetches the value of a 64-bit general purpose register.
6378 *
6379 * @returns The register value.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6387}
6388
6389
6390/**
6391 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6392 *
6393 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6394 * segment limit.
6395 *
6396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6397 * @param offNextInstr The offset of the next instruction.
6398 */
6399IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6400{
6401 switch (pVCpu->iem.s.enmEffOpSize)
6402 {
6403 case IEMMODE_16BIT:
6404 {
6405 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6406 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6407 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6408 return iemRaiseGeneralProtectionFault0(pVCpu);
6409 pVCpu->cpum.GstCtx.rip = uNewIp;
6410 break;
6411 }
6412
6413 case IEMMODE_32BIT:
6414 {
6415 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6416 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6417
6418 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6419 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6420 return iemRaiseGeneralProtectionFault0(pVCpu);
6421 pVCpu->cpum.GstCtx.rip = uNewEip;
6422 break;
6423 }
6424
6425 case IEMMODE_64BIT:
6426 {
6427 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6428
6429 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6430 if (!IEM_IS_CANONICAL(uNewRip))
6431 return iemRaiseGeneralProtectionFault0(pVCpu);
6432 pVCpu->cpum.GstCtx.rip = uNewRip;
6433 break;
6434 }
6435
6436 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6437 }
6438
6439 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6440
6441#ifndef IEM_WITH_CODE_TLB
6442 /* Flush the prefetch buffer. */
6443 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6444#endif
6445
6446 return VINF_SUCCESS;
6447}
6448
6449
6450/**
6451 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6452 *
6453 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6454 * segment limit.
6455 *
6456 * @returns Strict VBox status code.
6457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6458 * @param offNextInstr The offset of the next instruction.
6459 */
6460IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6461{
6462 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6463
6464 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6465 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6466 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6467 return iemRaiseGeneralProtectionFault0(pVCpu);
6468 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6469 pVCpu->cpum.GstCtx.rip = uNewIp;
6470 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6471
6472#ifndef IEM_WITH_CODE_TLB
6473 /* Flush the prefetch buffer. */
6474 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6475#endif
6476
6477 return VINF_SUCCESS;
6478}
6479
6480
6481/**
6482 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6483 *
6484 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6485 * segment limit.
6486 *
6487 * @returns Strict VBox status code.
6488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6489 * @param offNextInstr The offset of the next instruction.
6490 */
6491IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6492{
6493 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6494
6495 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6496 {
6497 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6498
6499 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6500 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6501 return iemRaiseGeneralProtectionFault0(pVCpu);
6502 pVCpu->cpum.GstCtx.rip = uNewEip;
6503 }
6504 else
6505 {
6506 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6507
6508 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6509 if (!IEM_IS_CANONICAL(uNewRip))
6510 return iemRaiseGeneralProtectionFault0(pVCpu);
6511 pVCpu->cpum.GstCtx.rip = uNewRip;
6512 }
6513 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6514
6515#ifndef IEM_WITH_CODE_TLB
6516 /* Flush the prefetch buffer. */
6517 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6518#endif
6519
6520 return VINF_SUCCESS;
6521}
6522
6523
6524/**
6525 * Performs a near jump to the specified address.
6526 *
6527 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6528 * segment limit.
6529 *
6530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6531 * @param uNewRip The new RIP value.
6532 */
6533IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6534{
6535 switch (pVCpu->iem.s.enmEffOpSize)
6536 {
6537 case IEMMODE_16BIT:
6538 {
6539 Assert(uNewRip <= UINT16_MAX);
6540 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6541 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6542 return iemRaiseGeneralProtectionFault0(pVCpu);
6543 /** @todo Test 16-bit jump in 64-bit mode. */
6544 pVCpu->cpum.GstCtx.rip = uNewRip;
6545 break;
6546 }
6547
6548 case IEMMODE_32BIT:
6549 {
6550 Assert(uNewRip <= UINT32_MAX);
6551 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6552 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6553
6554 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6555 return iemRaiseGeneralProtectionFault0(pVCpu);
6556 pVCpu->cpum.GstCtx.rip = uNewRip;
6557 break;
6558 }
6559
6560 case IEMMODE_64BIT:
6561 {
6562 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6563
6564 if (!IEM_IS_CANONICAL(uNewRip))
6565 return iemRaiseGeneralProtectionFault0(pVCpu);
6566 pVCpu->cpum.GstCtx.rip = uNewRip;
6567 break;
6568 }
6569
6570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6571 }
6572
6573 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6574
6575#ifndef IEM_WITH_CODE_TLB
6576 /* Flush the prefetch buffer. */
6577 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6578#endif
6579
6580 return VINF_SUCCESS;
6581}
6582
6583
6584/**
6585 * Get the address of the top of the stack.
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6590{
6591 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6592 return pVCpu->cpum.GstCtx.rsp;
6593 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6594 return pVCpu->cpum.GstCtx.esp;
6595 return pVCpu->cpum.GstCtx.sp;
6596}
6597
6598
6599/**
6600 * Updates the RIP/EIP/IP to point to the next instruction.
6601 *
6602 * This function leaves the EFLAGS.RF flag alone.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 * @param cbInstr The number of bytes to add.
6606 */
6607IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6608{
6609 switch (pVCpu->iem.s.enmCpuMode)
6610 {
6611 case IEMMODE_16BIT:
6612 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6613 pVCpu->cpum.GstCtx.eip += cbInstr;
6614 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6615 break;
6616
6617 case IEMMODE_32BIT:
6618 pVCpu->cpum.GstCtx.eip += cbInstr;
6619 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6620 break;
6621
6622 case IEMMODE_64BIT:
6623 pVCpu->cpum.GstCtx.rip += cbInstr;
6624 break;
6625 default: AssertFailed();
6626 }
6627}
6628
6629
6630#if 0
6631/**
6632 * Updates the RIP/EIP/IP to point to the next instruction.
6633 *
6634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6635 */
6636IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6637{
6638 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6639}
6640#endif
6641
6642
6643
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 * @param cbInstr The number of bytes to add.
6649 */
6650IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6651{
6652 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6653
6654 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6655#if ARCH_BITS >= 64
6656 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6657 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6658 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6659#else
6660 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6661 pVCpu->cpum.GstCtx.rip += cbInstr;
6662 else
6663 pVCpu->cpum.GstCtx.eip += cbInstr;
6664#endif
6665}
6666
6667
6668/**
6669 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6670 *
6671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6672 */
6673IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6674{
6675 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6676}
6677
6678
6679/**
6680 * Adds to the stack pointer.
6681 *
6682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6683 * @param cbToAdd The number of bytes to add (8-bit!).
6684 */
6685DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6686{
6687 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6688 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6689 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6690 pVCpu->cpum.GstCtx.esp += cbToAdd;
6691 else
6692 pVCpu->cpum.GstCtx.sp += cbToAdd;
6693}
6694
6695
6696/**
6697 * Subtracts from the stack pointer.
6698 *
6699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6700 * @param cbToSub The number of bytes to subtract (8-bit!).
6701 */
6702DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6703{
6704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6705 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6706 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6707 pVCpu->cpum.GstCtx.esp -= cbToSub;
6708 else
6709 pVCpu->cpum.GstCtx.sp -= cbToSub;
6710}
6711
6712
6713/**
6714 * Adds to the temporary stack pointer.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6718 * @param cbToAdd The number of bytes to add (16-bit).
6719 */
6720DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6721{
6722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6723 pTmpRsp->u += cbToAdd;
6724 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6725 pTmpRsp->DWords.dw0 += cbToAdd;
6726 else
6727 pTmpRsp->Words.w0 += cbToAdd;
6728}
6729
6730
6731/**
6732 * Subtracts from the temporary stack pointer.
6733 *
6734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6735 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6736 * @param cbToSub The number of bytes to subtract.
6737 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6738 * expecting that.
6739 */
6740DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6741{
6742 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6743 pTmpRsp->u -= cbToSub;
6744 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6745 pTmpRsp->DWords.dw0 -= cbToSub;
6746 else
6747 pTmpRsp->Words.w0 -= cbToSub;
6748}
6749
6750
6751/**
6752 * Calculates the effective stack address for a push of the specified size as
6753 * well as the new RSP value (upper bits may be masked).
6754 *
6755 * @returns Effective stack addressf for the push.
6756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6757 * @param cbItem The size of the stack item to pop.
6758 * @param puNewRsp Where to return the new RSP value.
6759 */
6760DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6761{
6762 RTUINT64U uTmpRsp;
6763 RTGCPTR GCPtrTop;
6764 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6765
6766 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6767 GCPtrTop = uTmpRsp.u -= cbItem;
6768 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6769 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6770 else
6771 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6772 *puNewRsp = uTmpRsp.u;
6773 return GCPtrTop;
6774}
6775
6776
6777/**
6778 * Gets the current stack pointer and calculates the value after a pop of the
6779 * specified size.
6780 *
6781 * @returns Current stack pointer.
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param cbItem The size of the stack item to pop.
6784 * @param puNewRsp Where to return the new RSP value.
6785 */
6786DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6787{
6788 RTUINT64U uTmpRsp;
6789 RTGCPTR GCPtrTop;
6790 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6791
6792 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6793 {
6794 GCPtrTop = uTmpRsp.u;
6795 uTmpRsp.u += cbItem;
6796 }
6797 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6798 {
6799 GCPtrTop = uTmpRsp.DWords.dw0;
6800 uTmpRsp.DWords.dw0 += cbItem;
6801 }
6802 else
6803 {
6804 GCPtrTop = uTmpRsp.Words.w0;
6805 uTmpRsp.Words.w0 += cbItem;
6806 }
6807 *puNewRsp = uTmpRsp.u;
6808 return GCPtrTop;
6809}
6810
6811
6812/**
6813 * Calculates the effective stack address for a push of the specified size as
6814 * well as the new temporary RSP value (upper bits may be masked).
6815 *
6816 * @returns Effective stack addressf for the push.
6817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6818 * @param pTmpRsp The temporary stack pointer. This is updated.
6819 * @param cbItem The size of the stack item to pop.
6820 */
6821DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6822{
6823 RTGCPTR GCPtrTop;
6824
6825 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6826 GCPtrTop = pTmpRsp->u -= cbItem;
6827 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6828 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6829 else
6830 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6831 return GCPtrTop;
6832}
6833
6834
6835/**
6836 * Gets the effective stack address for a pop of the specified size and
6837 * calculates and updates the temporary RSP.
6838 *
6839 * @returns Current stack pointer.
6840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6841 * @param pTmpRsp The temporary stack pointer. This is updated.
6842 * @param cbItem The size of the stack item to pop.
6843 */
6844DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6845{
6846 RTGCPTR GCPtrTop;
6847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6848 {
6849 GCPtrTop = pTmpRsp->u;
6850 pTmpRsp->u += cbItem;
6851 }
6852 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6853 {
6854 GCPtrTop = pTmpRsp->DWords.dw0;
6855 pTmpRsp->DWords.dw0 += cbItem;
6856 }
6857 else
6858 {
6859 GCPtrTop = pTmpRsp->Words.w0;
6860 pTmpRsp->Words.w0 += cbItem;
6861 }
6862 return GCPtrTop;
6863}
6864
6865/** @} */
6866
6867
6868/** @name FPU access and helpers.
6869 *
6870 * @{
6871 */
6872
6873
6874/**
6875 * Hook for preparing to use the host FPU.
6876 *
6877 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6878 *
6879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6880 */
6881DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6882{
6883#ifdef IN_RING3
6884 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6885#else
6886 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6887#endif
6888 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6889}
6890
6891
6892/**
6893 * Hook for preparing to use the host FPU for SSE.
6894 *
6895 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6896 *
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 */
6899DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6900{
6901 iemFpuPrepareUsage(pVCpu);
6902}
6903
6904
6905/**
6906 * Hook for preparing to use the host FPU for AVX.
6907 *
6908 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6913{
6914 iemFpuPrepareUsage(pVCpu);
6915}
6916
6917
6918/**
6919 * Hook for actualizing the guest FPU state before the interpreter reads it.
6920 *
6921 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 */
6925DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6926{
6927#ifdef IN_RING3
6928 NOREF(pVCpu);
6929#else
6930 CPUMRZFpuStateActualizeForRead(pVCpu);
6931#endif
6932 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6933}
6934
6935
6936/**
6937 * Hook for actualizing the guest FPU state before the interpreter changes it.
6938 *
6939 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6940 *
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 */
6943DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6944{
6945#ifdef IN_RING3
6946 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6947#else
6948 CPUMRZFpuStateActualizeForChange(pVCpu);
6949#endif
6950 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6951}
6952
6953
6954/**
6955 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6956 * only.
6957 *
6958 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6959 *
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 */
6962DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6963{
6964#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6965 NOREF(pVCpu);
6966#else
6967 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6968#endif
6969 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6970}
6971
6972
6973/**
6974 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6975 * read+write.
6976 *
6977 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 */
6981DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6982{
6983#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6984 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6985#else
6986 CPUMRZFpuStateActualizeForChange(pVCpu);
6987#endif
6988 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6989}
6990
6991
6992/**
6993 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6994 * only.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7001{
7002#ifdef IN_RING3
7003 NOREF(pVCpu);
7004#else
7005 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7006#endif
7007 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7008}
7009
7010
7011/**
7012 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7013 * read+write.
7014 *
7015 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7016 *
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 */
7019DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7020{
7021#ifdef IN_RING3
7022 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7023#else
7024 CPUMRZFpuStateActualizeForChange(pVCpu);
7025#endif
7026 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7027}
7028
7029
7030/**
7031 * Stores a QNaN value into a FPU register.
7032 *
7033 * @param pReg Pointer to the register.
7034 */
7035DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7036{
7037 pReg->au32[0] = UINT32_C(0x00000000);
7038 pReg->au32[1] = UINT32_C(0xc0000000);
7039 pReg->au16[4] = UINT16_C(0xffff);
7040}
7041
7042
7043/**
7044 * Updates the FOP, FPU.CS and FPUIP registers.
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pFpuCtx The FPU context.
7048 */
7049DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7050{
7051 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7052 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7053 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7054 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7055 {
7056 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7057 * happens in real mode here based on the fnsave and fnstenv images. */
7058 pFpuCtx->CS = 0;
7059 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7060 }
7061 else
7062 {
7063 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7064 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7065 }
7066}
7067
7068
7069/**
7070 * Updates the x87.DS and FPUDP registers.
7071 *
7072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7073 * @param pFpuCtx The FPU context.
7074 * @param iEffSeg The effective segment register.
7075 * @param GCPtrEff The effective address relative to @a iEffSeg.
7076 */
7077DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7078{
7079 RTSEL sel;
7080 switch (iEffSeg)
7081 {
7082 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7083 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7084 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7085 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7086 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7087 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7088 default:
7089 AssertMsgFailed(("%d\n", iEffSeg));
7090 sel = pVCpu->cpum.GstCtx.ds.Sel;
7091 }
7092 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7093 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7094 {
7095 pFpuCtx->DS = 0;
7096 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7097 }
7098 else
7099 {
7100 pFpuCtx->DS = sel;
7101 pFpuCtx->FPUDP = GCPtrEff;
7102 }
7103}
7104
7105
7106/**
7107 * Rotates the stack registers in the push direction.
7108 *
7109 * @param pFpuCtx The FPU context.
7110 * @remarks This is a complete waste of time, but fxsave stores the registers in
7111 * stack order.
7112 */
7113DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7114{
7115 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7116 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7117 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7118 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7119 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7120 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7121 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7122 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7123 pFpuCtx->aRegs[0].r80 = r80Tmp;
7124}
7125
7126
7127/**
7128 * Rotates the stack registers in the pop direction.
7129 *
7130 * @param pFpuCtx The FPU context.
7131 * @remarks This is a complete waste of time, but fxsave stores the registers in
7132 * stack order.
7133 */
7134DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7135{
7136 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7137 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7138 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7139 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7140 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7141 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7142 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7143 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7144 pFpuCtx->aRegs[7].r80 = r80Tmp;
7145}
7146
7147
7148/**
7149 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7150 * exception prevents it.
7151 *
7152 * @param pResult The FPU operation result to push.
7153 * @param pFpuCtx The FPU context.
7154 */
7155IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7156{
7157 /* Update FSW and bail if there are pending exceptions afterwards. */
7158 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7159 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7160 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7161 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7162 {
7163 pFpuCtx->FSW = fFsw;
7164 return;
7165 }
7166
7167 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7168 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7169 {
7170 /* All is fine, push the actual value. */
7171 pFpuCtx->FTW |= RT_BIT(iNewTop);
7172 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7173 }
7174 else if (pFpuCtx->FCW & X86_FCW_IM)
7175 {
7176 /* Masked stack overflow, push QNaN. */
7177 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7178 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7179 }
7180 else
7181 {
7182 /* Raise stack overflow, don't push anything. */
7183 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7185 return;
7186 }
7187
7188 fFsw &= ~X86_FSW_TOP_MASK;
7189 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7190 pFpuCtx->FSW = fFsw;
7191
7192 iemFpuRotateStackPush(pFpuCtx);
7193}
7194
7195
7196/**
7197 * Stores a result in a FPU register and updates the FSW and FTW.
7198 *
7199 * @param pFpuCtx The FPU context.
7200 * @param pResult The result to store.
7201 * @param iStReg Which FPU register to store it in.
7202 */
7203IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7204{
7205 Assert(iStReg < 8);
7206 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7207 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7208 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7209 pFpuCtx->FTW |= RT_BIT(iReg);
7210 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7211}
7212
7213
7214/**
7215 * Only updates the FPU status word (FSW) with the result of the current
7216 * instruction.
7217 *
7218 * @param pFpuCtx The FPU context.
7219 * @param u16FSW The FSW output of the current instruction.
7220 */
7221IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7222{
7223 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7224 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7225}
7226
7227
7228/**
7229 * Pops one item off the FPU stack if no pending exception prevents it.
7230 *
7231 * @param pFpuCtx The FPU context.
7232 */
7233IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7234{
7235 /* Check pending exceptions. */
7236 uint16_t uFSW = pFpuCtx->FSW;
7237 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7238 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7239 return;
7240
7241 /* TOP--. */
7242 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7243 uFSW &= ~X86_FSW_TOP_MASK;
7244 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7245 pFpuCtx->FSW = uFSW;
7246
7247 /* Mark the previous ST0 as empty. */
7248 iOldTop >>= X86_FSW_TOP_SHIFT;
7249 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7250
7251 /* Rotate the registers. */
7252 iemFpuRotateStackPop(pFpuCtx);
7253}
7254
7255
7256/**
7257 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7258 *
7259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7260 * @param pResult The FPU operation result to push.
7261 */
7262IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7263{
7264 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7265 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7266 iemFpuMaybePushResult(pResult, pFpuCtx);
7267}
7268
7269
7270/**
7271 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7272 * and sets FPUDP and FPUDS.
7273 *
7274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7275 * @param pResult The FPU operation result to push.
7276 * @param iEffSeg The effective segment register.
7277 * @param GCPtrEff The effective address relative to @a iEffSeg.
7278 */
7279IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7280{
7281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7282 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7284 iemFpuMaybePushResult(pResult, pFpuCtx);
7285}
7286
7287
7288/**
7289 * Replace ST0 with the first value and push the second onto the FPU stack,
7290 * unless a pending exception prevents it.
7291 *
7292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7293 * @param pResult The FPU operation result to store and push.
7294 */
7295IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7296{
7297 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7298 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7299
7300 /* Update FSW and bail if there are pending exceptions afterwards. */
7301 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7302 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7303 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7304 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7305 {
7306 pFpuCtx->FSW = fFsw;
7307 return;
7308 }
7309
7310 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7311 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7312 {
7313 /* All is fine, push the actual value. */
7314 pFpuCtx->FTW |= RT_BIT(iNewTop);
7315 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7316 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7317 }
7318 else if (pFpuCtx->FCW & X86_FCW_IM)
7319 {
7320 /* Masked stack overflow, push QNaN. */
7321 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7322 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7323 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7324 }
7325 else
7326 {
7327 /* Raise stack overflow, don't push anything. */
7328 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7329 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7330 return;
7331 }
7332
7333 fFsw &= ~X86_FSW_TOP_MASK;
7334 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7335 pFpuCtx->FSW = fFsw;
7336
7337 iemFpuRotateStackPush(pFpuCtx);
7338}
7339
7340
7341/**
7342 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7343 * FOP.
7344 *
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param pResult The result to store.
7347 * @param iStReg Which FPU register to store it in.
7348 */
7349IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7350{
7351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7352 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7353 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7354}
7355
7356
7357/**
7358 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7359 * FOP, and then pops the stack.
7360 *
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 * @param pResult The result to store.
7363 * @param iStReg Which FPU register to store it in.
7364 */
7365IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7366{
7367 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7368 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7369 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7370 iemFpuMaybePopOne(pFpuCtx);
7371}
7372
7373
7374/**
7375 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7376 * FPUDP, and FPUDS.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param pResult The result to store.
7380 * @param iStReg Which FPU register to store it in.
7381 * @param iEffSeg The effective memory operand selector register.
7382 * @param GCPtrEff The effective memory operand offset.
7383 */
7384IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7385 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7388 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7389 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7390 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7391}
7392
7393
7394/**
7395 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7396 * FPUDP, and FPUDS, and then pops the stack.
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param pResult The result to store.
7400 * @param iStReg Which FPU register to store it in.
7401 * @param iEffSeg The effective memory operand selector register.
7402 * @param GCPtrEff The effective memory operand offset.
7403 */
7404IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7405 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7409 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7410 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7411 iemFpuMaybePopOne(pFpuCtx);
7412}
7413
7414
7415/**
7416 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 */
7420IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7421{
7422 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7423 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7424}
7425
7426
7427/**
7428 * Marks the specified stack register as free (for FFREE).
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 * @param iStReg The register to free.
7432 */
7433IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7434{
7435 Assert(iStReg < 8);
7436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7437 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7438 pFpuCtx->FTW &= ~RT_BIT(iReg);
7439}
7440
7441
7442/**
7443 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 */
7447IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7448{
7449 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7450 uint16_t uFsw = pFpuCtx->FSW;
7451 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7452 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7453 uFsw &= ~X86_FSW_TOP_MASK;
7454 uFsw |= uTop;
7455 pFpuCtx->FSW = uFsw;
7456}
7457
7458
7459/**
7460 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 */
7464IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7465{
7466 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7467 uint16_t uFsw = pFpuCtx->FSW;
7468 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7469 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7470 uFsw &= ~X86_FSW_TOP_MASK;
7471 uFsw |= uTop;
7472 pFpuCtx->FSW = uFsw;
7473}
7474
7475
7476/**
7477 * Updates the FSW, FOP, FPUIP, and FPUCS.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param u16FSW The FSW from the current instruction.
7481 */
7482IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7483{
7484 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7485 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7486 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7487}
7488
7489
7490/**
7491 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7492 *
7493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7494 * @param u16FSW The FSW from the current instruction.
7495 */
7496IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7497{
7498 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7500 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7501 iemFpuMaybePopOne(pFpuCtx);
7502}
7503
7504
7505/**
7506 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 * @param u16FSW The FSW from the current instruction.
7510 * @param iEffSeg The effective memory operand selector register.
7511 * @param GCPtrEff The effective memory operand offset.
7512 */
7513IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7514{
7515 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7516 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7517 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7518 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7519}
7520
7521
7522/**
7523 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7524 *
7525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7526 * @param u16FSW The FSW from the current instruction.
7527 */
7528IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7529{
7530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7531 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7532 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7533 iemFpuMaybePopOne(pFpuCtx);
7534 iemFpuMaybePopOne(pFpuCtx);
7535}
7536
7537
7538/**
7539 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7540 *
7541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7542 * @param u16FSW The FSW from the current instruction.
7543 * @param iEffSeg The effective memory operand selector register.
7544 * @param GCPtrEff The effective memory operand offset.
7545 */
7546IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7547{
7548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7549 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7550 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7551 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7552 iemFpuMaybePopOne(pFpuCtx);
7553}
7554
7555
7556/**
7557 * Worker routine for raising an FPU stack underflow exception.
7558 *
7559 * @param pFpuCtx The FPU context.
7560 * @param iStReg The stack register being accessed.
7561 */
7562IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7563{
7564 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7565 if (pFpuCtx->FCW & X86_FCW_IM)
7566 {
7567 /* Masked underflow. */
7568 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7569 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7570 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7571 if (iStReg != UINT8_MAX)
7572 {
7573 pFpuCtx->FTW |= RT_BIT(iReg);
7574 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7575 }
7576 }
7577 else
7578 {
7579 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7580 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7581 }
7582}
7583
7584
7585/**
7586 * Raises a FPU stack underflow exception.
7587 *
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param iStReg The destination register that should be loaded
7590 * with QNaN if \#IS is not masked. Specify
7591 * UINT8_MAX if none (like for fcom).
7592 */
7593DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7594{
7595 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7597 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7598}
7599
7600
7601DECL_NO_INLINE(IEM_STATIC, void)
7602iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7603{
7604 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7607 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7608}
7609
7610
7611DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7612{
7613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7614 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7615 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7616 iemFpuMaybePopOne(pFpuCtx);
7617}
7618
7619
7620DECL_NO_INLINE(IEM_STATIC, void)
7621iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7622{
7623 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7626 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7627 iemFpuMaybePopOne(pFpuCtx);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7632{
7633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7636 iemFpuMaybePopOne(pFpuCtx);
7637 iemFpuMaybePopOne(pFpuCtx);
7638}
7639
7640
7641DECL_NO_INLINE(IEM_STATIC, void)
7642iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7643{
7644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646
7647 if (pFpuCtx->FCW & X86_FCW_IM)
7648 {
7649 /* Masked overflow - Push QNaN. */
7650 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7651 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7652 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7653 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7654 pFpuCtx->FTW |= RT_BIT(iNewTop);
7655 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7656 iemFpuRotateStackPush(pFpuCtx);
7657 }
7658 else
7659 {
7660 /* Exception pending - don't change TOP or the register stack. */
7661 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7662 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7663 }
7664}
7665
7666
7667DECL_NO_INLINE(IEM_STATIC, void)
7668iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7669{
7670 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7671 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7672
7673 if (pFpuCtx->FCW & X86_FCW_IM)
7674 {
7675 /* Masked overflow - Push QNaN. */
7676 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7677 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7678 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7679 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7680 pFpuCtx->FTW |= RT_BIT(iNewTop);
7681 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7682 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7683 iemFpuRotateStackPush(pFpuCtx);
7684 }
7685 else
7686 {
7687 /* Exception pending - don't change TOP or the register stack. */
7688 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7689 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7690 }
7691}
7692
7693
7694/**
7695 * Worker routine for raising an FPU stack overflow exception on a push.
7696 *
7697 * @param pFpuCtx The FPU context.
7698 */
7699IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7700{
7701 if (pFpuCtx->FCW & X86_FCW_IM)
7702 {
7703 /* Masked overflow. */
7704 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7705 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7706 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7707 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7708 pFpuCtx->FTW |= RT_BIT(iNewTop);
7709 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7710 iemFpuRotateStackPush(pFpuCtx);
7711 }
7712 else
7713 {
7714 /* Exception pending - don't change TOP or the register stack. */
7715 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7716 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7717 }
7718}
7719
7720
7721/**
7722 * Raises a FPU stack overflow exception on a push.
7723 *
7724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7725 */
7726DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7727{
7728 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7729 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7730 iemFpuStackPushOverflowOnly(pFpuCtx);
7731}
7732
7733
7734/**
7735 * Raises a FPU stack overflow exception on a push with a memory operand.
7736 *
7737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7738 * @param iEffSeg The effective memory operand selector register.
7739 * @param GCPtrEff The effective memory operand offset.
7740 */
7741DECL_NO_INLINE(IEM_STATIC, void)
7742iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7743{
7744 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7745 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7746 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7747 iemFpuStackPushOverflowOnly(pFpuCtx);
7748}
7749
7750
7751IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7752{
7753 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7754 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7755 if (pFpuCtx->FTW & RT_BIT(iReg))
7756 return VINF_SUCCESS;
7757 return VERR_NOT_FOUND;
7758}
7759
7760
7761IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7762{
7763 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7764 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7765 if (pFpuCtx->FTW & RT_BIT(iReg))
7766 {
7767 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7768 return VINF_SUCCESS;
7769 }
7770 return VERR_NOT_FOUND;
7771}
7772
7773
7774IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7775 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7776{
7777 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7778 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7779 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7780 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7781 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7782 {
7783 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7784 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7785 return VINF_SUCCESS;
7786 }
7787 return VERR_NOT_FOUND;
7788}
7789
7790
7791IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7792{
7793 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7794 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7795 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7796 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7797 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7798 {
7799 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7800 return VINF_SUCCESS;
7801 }
7802 return VERR_NOT_FOUND;
7803}
7804
7805
7806/**
7807 * Updates the FPU exception status after FCW is changed.
7808 *
7809 * @param pFpuCtx The FPU context.
7810 */
7811IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7812{
7813 uint16_t u16Fsw = pFpuCtx->FSW;
7814 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7815 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7816 else
7817 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7818 pFpuCtx->FSW = u16Fsw;
7819}
7820
7821
7822/**
7823 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7824 *
7825 * @returns The full FTW.
7826 * @param pFpuCtx The FPU context.
7827 */
7828IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7829{
7830 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7831 uint16_t u16Ftw = 0;
7832 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7833 for (unsigned iSt = 0; iSt < 8; iSt++)
7834 {
7835 unsigned const iReg = (iSt + iTop) & 7;
7836 if (!(u8Ftw & RT_BIT(iReg)))
7837 u16Ftw |= 3 << (iReg * 2); /* empty */
7838 else
7839 {
7840 uint16_t uTag;
7841 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7842 if (pr80Reg->s.uExponent == 0x7fff)
7843 uTag = 2; /* Exponent is all 1's => Special. */
7844 else if (pr80Reg->s.uExponent == 0x0000)
7845 {
7846 if (pr80Reg->s.u64Mantissa == 0x0000)
7847 uTag = 1; /* All bits are zero => Zero. */
7848 else
7849 uTag = 2; /* Must be special. */
7850 }
7851 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7852 uTag = 0; /* Valid. */
7853 else
7854 uTag = 2; /* Must be special. */
7855
7856 u16Ftw |= uTag << (iReg * 2); /* empty */
7857 }
7858 }
7859
7860 return u16Ftw;
7861}
7862
7863
7864/**
7865 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7866 *
7867 * @returns The compressed FTW.
7868 * @param u16FullFtw The full FTW to convert.
7869 */
7870IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7871{
7872 uint8_t u8Ftw = 0;
7873 for (unsigned i = 0; i < 8; i++)
7874 {
7875 if ((u16FullFtw & 3) != 3 /*empty*/)
7876 u8Ftw |= RT_BIT(i);
7877 u16FullFtw >>= 2;
7878 }
7879
7880 return u8Ftw;
7881}
7882
7883/** @} */
7884
7885
7886/** @name Memory access.
7887 *
7888 * @{
7889 */
7890
7891
7892/**
7893 * Updates the IEMCPU::cbWritten counter if applicable.
7894 *
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param fAccess The access being accounted for.
7897 * @param cbMem The access size.
7898 */
7899DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7900{
7901 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7902 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7903 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7904}
7905
7906
7907/**
7908 * Checks if the given segment can be written to, raise the appropriate
7909 * exception if not.
7910 *
7911 * @returns VBox strict status code.
7912 *
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param pHid Pointer to the hidden register.
7915 * @param iSegReg The register number.
7916 * @param pu64BaseAddr Where to return the base address to use for the
7917 * segment. (In 64-bit code it may differ from the
7918 * base in the hidden segment.)
7919 */
7920IEM_STATIC VBOXSTRICTRC
7921iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7922{
7923 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7924
7925 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7926 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7927 else
7928 {
7929 if (!pHid->Attr.n.u1Present)
7930 {
7931 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7932 AssertRelease(uSel == 0);
7933 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7934 return iemRaiseGeneralProtectionFault0(pVCpu);
7935 }
7936
7937 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7938 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7939 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7940 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7941 *pu64BaseAddr = pHid->u64Base;
7942 }
7943 return VINF_SUCCESS;
7944}
7945
7946
7947/**
7948 * Checks if the given segment can be read from, raise the appropriate
7949 * exception if not.
7950 *
7951 * @returns VBox strict status code.
7952 *
7953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7954 * @param pHid Pointer to the hidden register.
7955 * @param iSegReg The register number.
7956 * @param pu64BaseAddr Where to return the base address to use for the
7957 * segment. (In 64-bit code it may differ from the
7958 * base in the hidden segment.)
7959 */
7960IEM_STATIC VBOXSTRICTRC
7961iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7962{
7963 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7964
7965 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7966 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7967 else
7968 {
7969 if (!pHid->Attr.n.u1Present)
7970 {
7971 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7972 AssertRelease(uSel == 0);
7973 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7974 return iemRaiseGeneralProtectionFault0(pVCpu);
7975 }
7976
7977 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7978 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7979 *pu64BaseAddr = pHid->u64Base;
7980 }
7981 return VINF_SUCCESS;
7982}
7983
7984
7985/**
7986 * Applies the segment limit, base and attributes.
7987 *
7988 * This may raise a \#GP or \#SS.
7989 *
7990 * @returns VBox strict status code.
7991 *
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param fAccess The kind of access which is being performed.
7994 * @param iSegReg The index of the segment register to apply.
7995 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7996 * TSS, ++).
7997 * @param cbMem The access size.
7998 * @param pGCPtrMem Pointer to the guest memory address to apply
7999 * segmentation to. Input and output parameter.
8000 */
8001IEM_STATIC VBOXSTRICTRC
8002iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8003{
8004 if (iSegReg == UINT8_MAX)
8005 return VINF_SUCCESS;
8006
8007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8008 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8009 switch (pVCpu->iem.s.enmCpuMode)
8010 {
8011 case IEMMODE_16BIT:
8012 case IEMMODE_32BIT:
8013 {
8014 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8015 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8016
8017 if ( pSel->Attr.n.u1Present
8018 && !pSel->Attr.n.u1Unusable)
8019 {
8020 Assert(pSel->Attr.n.u1DescType);
8021 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8022 {
8023 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8024 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8025 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8026
8027 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8028 {
8029 /** @todo CPL check. */
8030 }
8031
8032 /*
8033 * There are two kinds of data selectors, normal and expand down.
8034 */
8035 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8036 {
8037 if ( GCPtrFirst32 > pSel->u32Limit
8038 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8039 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8040 }
8041 else
8042 {
8043 /*
8044 * The upper boundary is defined by the B bit, not the G bit!
8045 */
8046 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8047 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8048 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8049 }
8050 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8051 }
8052 else
8053 {
8054
8055 /*
8056 * Code selector and usually be used to read thru, writing is
8057 * only permitted in real and V8086 mode.
8058 */
8059 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8060 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8061 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8062 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8063 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8064
8065 if ( GCPtrFirst32 > pSel->u32Limit
8066 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8067 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8068
8069 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8070 {
8071 /** @todo CPL check. */
8072 }
8073
8074 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8075 }
8076 }
8077 else
8078 return iemRaiseGeneralProtectionFault0(pVCpu);
8079 return VINF_SUCCESS;
8080 }
8081
8082 case IEMMODE_64BIT:
8083 {
8084 RTGCPTR GCPtrMem = *pGCPtrMem;
8085 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8086 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8087
8088 Assert(cbMem >= 1);
8089 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8090 return VINF_SUCCESS;
8091 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8092 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8093 return iemRaiseGeneralProtectionFault0(pVCpu);
8094 }
8095
8096 default:
8097 AssertFailedReturn(VERR_IEM_IPE_7);
8098 }
8099}
8100
8101
8102/**
8103 * Translates a virtual address to a physical physical address and checks if we
8104 * can access the page as specified.
8105 *
8106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8107 * @param GCPtrMem The virtual address.
8108 * @param fAccess The intended access.
8109 * @param pGCPhysMem Where to return the physical address.
8110 */
8111IEM_STATIC VBOXSTRICTRC
8112iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8113{
8114 /** @todo Need a different PGM interface here. We're currently using
8115 * generic / REM interfaces. this won't cut it for R0. */
8116 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8117 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8118 RTGCPHYS GCPhys;
8119 uint64_t fFlags;
8120 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8121 if (RT_FAILURE(rc))
8122 {
8123 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8124 /** @todo Check unassigned memory in unpaged mode. */
8125 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8128 }
8129
8130 /* If the page is writable and does not have the no-exec bit set, all
8131 access is allowed. Otherwise we'll have to check more carefully... */
8132 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8133 {
8134 /* Write to read only memory? */
8135 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8136 && !(fFlags & X86_PTE_RW)
8137 && ( (pVCpu->iem.s.uCpl == 3
8138 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8139 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8140 {
8141 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8142 *pGCPhysMem = NIL_RTGCPHYS;
8143 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8144 }
8145
8146 /* Kernel memory accessed by userland? */
8147 if ( !(fFlags & X86_PTE_US)
8148 && pVCpu->iem.s.uCpl == 3
8149 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8150 {
8151 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8152 *pGCPhysMem = NIL_RTGCPHYS;
8153 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8154 }
8155
8156 /* Executing non-executable memory? */
8157 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8158 && (fFlags & X86_PTE_PAE_NX)
8159 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8160 {
8161 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8162 *pGCPhysMem = NIL_RTGCPHYS;
8163 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8164 VERR_ACCESS_DENIED);
8165 }
8166 }
8167
8168 /*
8169 * Set the dirty / access flags.
8170 * ASSUMES this is set when the address is translated rather than on committ...
8171 */
8172 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8173 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8174 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8175 {
8176 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8177 AssertRC(rc2);
8178 }
8179
8180 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8181 *pGCPhysMem = GCPhys;
8182 return VINF_SUCCESS;
8183}
8184
8185
8186
8187/**
8188 * Maps a physical page.
8189 *
8190 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8192 * @param GCPhysMem The physical address.
8193 * @param fAccess The intended access.
8194 * @param ppvMem Where to return the mapping address.
8195 * @param pLock The PGM lock.
8196 */
8197IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8198{
8199#ifdef IEM_LOG_MEMORY_WRITES
8200 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8201 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8202#endif
8203
8204 /** @todo This API may require some improving later. A private deal with PGM
8205 * regarding locking and unlocking needs to be struct. A couple of TLBs
8206 * living in PGM, but with publicly accessible inlined access methods
8207 * could perhaps be an even better solution. */
8208 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8209 GCPhysMem,
8210 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8211 pVCpu->iem.s.fBypassHandlers,
8212 ppvMem,
8213 pLock);
8214 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8215 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8216
8217 return rc;
8218}
8219
8220
8221/**
8222 * Unmap a page previously mapped by iemMemPageMap.
8223 *
8224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8225 * @param GCPhysMem The physical address.
8226 * @param fAccess The intended access.
8227 * @param pvMem What iemMemPageMap returned.
8228 * @param pLock The PGM lock.
8229 */
8230DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8231{
8232 NOREF(pVCpu);
8233 NOREF(GCPhysMem);
8234 NOREF(fAccess);
8235 NOREF(pvMem);
8236 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8237}
8238
8239
8240/**
8241 * Looks up a memory mapping entry.
8242 *
8243 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8245 * @param pvMem The memory address.
8246 * @param fAccess The access to.
8247 */
8248DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8249{
8250 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8251 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8252 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8253 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8254 return 0;
8255 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8256 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8257 return 1;
8258 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8259 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8260 return 2;
8261 return VERR_NOT_FOUND;
8262}
8263
8264
8265/**
8266 * Finds a free memmap entry when using iNextMapping doesn't work.
8267 *
8268 * @returns Memory mapping index, 1024 on failure.
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 */
8271IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8272{
8273 /*
8274 * The easy case.
8275 */
8276 if (pVCpu->iem.s.cActiveMappings == 0)
8277 {
8278 pVCpu->iem.s.iNextMapping = 1;
8279 return 0;
8280 }
8281
8282 /* There should be enough mappings for all instructions. */
8283 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8284
8285 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8286 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8287 return i;
8288
8289 AssertFailedReturn(1024);
8290}
8291
8292
8293/**
8294 * Commits a bounce buffer that needs writing back and unmaps it.
8295 *
8296 * @returns Strict VBox status code.
8297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8298 * @param iMemMap The index of the buffer to commit.
8299 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8300 * Always false in ring-3, obviously.
8301 */
8302IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8303{
8304 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8305 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8306#ifdef IN_RING3
8307 Assert(!fPostponeFail);
8308 RT_NOREF_PV(fPostponeFail);
8309#endif
8310
8311 /*
8312 * Do the writing.
8313 */
8314 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8315 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8316 {
8317 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8318 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8319 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8320 if (!pVCpu->iem.s.fBypassHandlers)
8321 {
8322 /*
8323 * Carefully and efficiently dealing with access handler return
8324 * codes make this a little bloated.
8325 */
8326 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8327 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8328 pbBuf,
8329 cbFirst,
8330 PGMACCESSORIGIN_IEM);
8331 if (rcStrict == VINF_SUCCESS)
8332 {
8333 if (cbSecond)
8334 {
8335 rcStrict = PGMPhysWrite(pVM,
8336 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8337 pbBuf + cbFirst,
8338 cbSecond,
8339 PGMACCESSORIGIN_IEM);
8340 if (rcStrict == VINF_SUCCESS)
8341 { /* nothing */ }
8342 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8343 {
8344 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8346 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8347 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8348 }
8349#ifndef IN_RING3
8350 else if (fPostponeFail)
8351 {
8352 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8355 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8356 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8357 return iemSetPassUpStatus(pVCpu, rcStrict);
8358 }
8359#endif
8360 else
8361 {
8362 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8364 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8365 return rcStrict;
8366 }
8367 }
8368 }
8369 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8370 {
8371 if (!cbSecond)
8372 {
8373 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8374 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8375 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8376 }
8377 else
8378 {
8379 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8381 pbBuf + cbFirst,
8382 cbSecond,
8383 PGMACCESSORIGIN_IEM);
8384 if (rcStrict2 == VINF_SUCCESS)
8385 {
8386 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8388 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8389 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8390 }
8391 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8392 {
8393 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8396 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8397 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8398 }
8399#ifndef IN_RING3
8400 else if (fPostponeFail)
8401 {
8402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8405 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8406 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8407 return iemSetPassUpStatus(pVCpu, rcStrict);
8408 }
8409#endif
8410 else
8411 {
8412 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8413 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8415 return rcStrict2;
8416 }
8417 }
8418 }
8419#ifndef IN_RING3
8420 else if (fPostponeFail)
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8425 if (!cbSecond)
8426 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8427 else
8428 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8429 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8430 return iemSetPassUpStatus(pVCpu, rcStrict);
8431 }
8432#endif
8433 else
8434 {
8435 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8438 return rcStrict;
8439 }
8440 }
8441 else
8442 {
8443 /*
8444 * No access handlers, much simpler.
8445 */
8446 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8447 if (RT_SUCCESS(rc))
8448 {
8449 if (cbSecond)
8450 {
8451 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8452 if (RT_SUCCESS(rc))
8453 { /* likely */ }
8454 else
8455 {
8456 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8457 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8459 return rc;
8460 }
8461 }
8462 }
8463 else
8464 {
8465 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8468 return rc;
8469 }
8470 }
8471 }
8472
8473#if defined(IEM_LOG_MEMORY_WRITES)
8474 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8475 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8476 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8477 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8478 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8479 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8480
8481 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8482 g_cbIemWrote = cbWrote;
8483 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8484#endif
8485
8486 /*
8487 * Free the mapping entry.
8488 */
8489 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8490 Assert(pVCpu->iem.s.cActiveMappings != 0);
8491 pVCpu->iem.s.cActiveMappings--;
8492 return VINF_SUCCESS;
8493}
8494
8495
8496/**
8497 * iemMemMap worker that deals with a request crossing pages.
8498 */
8499IEM_STATIC VBOXSTRICTRC
8500iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8501{
8502 /*
8503 * Do the address translations.
8504 */
8505 RTGCPHYS GCPhysFirst;
8506 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8507 if (rcStrict != VINF_SUCCESS)
8508 return rcStrict;
8509
8510 RTGCPHYS GCPhysSecond;
8511 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8512 fAccess, &GCPhysSecond);
8513 if (rcStrict != VINF_SUCCESS)
8514 return rcStrict;
8515 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8516
8517 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8518
8519 /*
8520 * Read in the current memory content if it's a read, execute or partial
8521 * write access.
8522 */
8523 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8524 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8525 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8526
8527 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8528 {
8529 if (!pVCpu->iem.s.fBypassHandlers)
8530 {
8531 /*
8532 * Must carefully deal with access handler status codes here,
8533 * makes the code a bit bloated.
8534 */
8535 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8536 if (rcStrict == VINF_SUCCESS)
8537 {
8538 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8539 if (rcStrict == VINF_SUCCESS)
8540 { /*likely */ }
8541 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8542 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8543 else
8544 {
8545 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8546 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8547 return rcStrict;
8548 }
8549 }
8550 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8551 {
8552 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8553 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8554 {
8555 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8556 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8557 }
8558 else
8559 {
8560 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8561 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8562 return rcStrict2;
8563 }
8564 }
8565 else
8566 {
8567 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8568 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8569 return rcStrict;
8570 }
8571 }
8572 else
8573 {
8574 /*
8575 * No informational status codes here, much more straight forward.
8576 */
8577 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8578 if (RT_SUCCESS(rc))
8579 {
8580 Assert(rc == VINF_SUCCESS);
8581 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8582 if (RT_SUCCESS(rc))
8583 Assert(rc == VINF_SUCCESS);
8584 else
8585 {
8586 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8587 return rc;
8588 }
8589 }
8590 else
8591 {
8592 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8593 return rc;
8594 }
8595 }
8596 }
8597#ifdef VBOX_STRICT
8598 else
8599 memset(pbBuf, 0xcc, cbMem);
8600 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8601 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8602#endif
8603
8604 /*
8605 * Commit the bounce buffer entry.
8606 */
8607 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8608 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8609 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8610 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8611 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8612 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8613 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8614 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8615 pVCpu->iem.s.cActiveMappings++;
8616
8617 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8618 *ppvMem = pbBuf;
8619 return VINF_SUCCESS;
8620}
8621
8622
8623/**
8624 * iemMemMap woker that deals with iemMemPageMap failures.
8625 */
8626IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8627 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8628{
8629 /*
8630 * Filter out conditions we can handle and the ones which shouldn't happen.
8631 */
8632 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8633 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8634 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8635 {
8636 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8637 return rcMap;
8638 }
8639 pVCpu->iem.s.cPotentialExits++;
8640
8641 /*
8642 * Read in the current memory content if it's a read, execute or partial
8643 * write access.
8644 */
8645 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8646 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8647 {
8648 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8649 memset(pbBuf, 0xff, cbMem);
8650 else
8651 {
8652 int rc;
8653 if (!pVCpu->iem.s.fBypassHandlers)
8654 {
8655 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8656 if (rcStrict == VINF_SUCCESS)
8657 { /* nothing */ }
8658 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8659 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8660 else
8661 {
8662 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8663 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8664 return rcStrict;
8665 }
8666 }
8667 else
8668 {
8669 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8670 if (RT_SUCCESS(rc))
8671 { /* likely */ }
8672 else
8673 {
8674 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8675 GCPhysFirst, rc));
8676 return rc;
8677 }
8678 }
8679 }
8680 }
8681#ifdef VBOX_STRICT
8682 else
8683 memset(pbBuf, 0xcc, cbMem);
8684#endif
8685#ifdef VBOX_STRICT
8686 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8687 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8688#endif
8689
8690 /*
8691 * Commit the bounce buffer entry.
8692 */
8693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8696 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8698 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8699 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8700 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8701 pVCpu->iem.s.cActiveMappings++;
8702
8703 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8704 *ppvMem = pbBuf;
8705 return VINF_SUCCESS;
8706}
8707
8708
8709
8710/**
8711 * Maps the specified guest memory for the given kind of access.
8712 *
8713 * This may be using bounce buffering of the memory if it's crossing a page
8714 * boundary or if there is an access handler installed for any of it. Because
8715 * of lock prefix guarantees, we're in for some extra clutter when this
8716 * happens.
8717 *
8718 * This may raise a \#GP, \#SS, \#PF or \#AC.
8719 *
8720 * @returns VBox strict status code.
8721 *
8722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8723 * @param ppvMem Where to return the pointer to the mapped
8724 * memory.
8725 * @param cbMem The number of bytes to map. This is usually 1,
8726 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8727 * string operations it can be up to a page.
8728 * @param iSegReg The index of the segment register to use for
8729 * this access. The base and limits are checked.
8730 * Use UINT8_MAX to indicate that no segmentation
8731 * is required (for IDT, GDT and LDT accesses).
8732 * @param GCPtrMem The address of the guest memory.
8733 * @param fAccess How the memory is being accessed. The
8734 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8735 * how to map the memory, while the
8736 * IEM_ACCESS_WHAT_XXX bit is used when raising
8737 * exceptions.
8738 */
8739IEM_STATIC VBOXSTRICTRC
8740iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8741{
8742 /*
8743 * Check the input and figure out which mapping entry to use.
8744 */
8745 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8746 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8747 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8748
8749 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8750 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8751 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8752 {
8753 iMemMap = iemMemMapFindFree(pVCpu);
8754 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8755 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8756 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8757 pVCpu->iem.s.aMemMappings[2].fAccess),
8758 VERR_IEM_IPE_9);
8759 }
8760
8761 /*
8762 * Map the memory, checking that we can actually access it. If something
8763 * slightly complicated happens, fall back on bounce buffering.
8764 */
8765 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8766 if (rcStrict != VINF_SUCCESS)
8767 return rcStrict;
8768
8769 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8770 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8771
8772 RTGCPHYS GCPhysFirst;
8773 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8774 if (rcStrict != VINF_SUCCESS)
8775 return rcStrict;
8776
8777 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8778 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8779 if (fAccess & IEM_ACCESS_TYPE_READ)
8780 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8781
8782 void *pvMem;
8783 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8784 if (rcStrict != VINF_SUCCESS)
8785 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8786
8787 /*
8788 * Fill in the mapping table entry.
8789 */
8790 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8791 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8792 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8793 pVCpu->iem.s.cActiveMappings++;
8794
8795 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8796 *ppvMem = pvMem;
8797
8798 return VINF_SUCCESS;
8799}
8800
8801
8802/**
8803 * Commits the guest memory if bounce buffered and unmaps it.
8804 *
8805 * @returns Strict VBox status code.
8806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8807 * @param pvMem The mapping.
8808 * @param fAccess The kind of access.
8809 */
8810IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8811{
8812 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8813 AssertReturn(iMemMap >= 0, iMemMap);
8814
8815 /* If it's bounce buffered, we may need to write back the buffer. */
8816 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8817 {
8818 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8819 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8820 }
8821 /* Otherwise unlock it. */
8822 else
8823 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8824
8825 /* Free the entry. */
8826 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8827 Assert(pVCpu->iem.s.cActiveMappings != 0);
8828 pVCpu->iem.s.cActiveMappings--;
8829 return VINF_SUCCESS;
8830}
8831
8832#ifdef IEM_WITH_SETJMP
8833
8834/**
8835 * Maps the specified guest memory for the given kind of access, longjmp on
8836 * error.
8837 *
8838 * This may be using bounce buffering of the memory if it's crossing a page
8839 * boundary or if there is an access handler installed for any of it. Because
8840 * of lock prefix guarantees, we're in for some extra clutter when this
8841 * happens.
8842 *
8843 * This may raise a \#GP, \#SS, \#PF or \#AC.
8844 *
8845 * @returns Pointer to the mapped memory.
8846 *
8847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8848 * @param cbMem The number of bytes to map. This is usually 1,
8849 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8850 * string operations it can be up to a page.
8851 * @param iSegReg The index of the segment register to use for
8852 * this access. The base and limits are checked.
8853 * Use UINT8_MAX to indicate that no segmentation
8854 * is required (for IDT, GDT and LDT accesses).
8855 * @param GCPtrMem The address of the guest memory.
8856 * @param fAccess How the memory is being accessed. The
8857 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8858 * how to map the memory, while the
8859 * IEM_ACCESS_WHAT_XXX bit is used when raising
8860 * exceptions.
8861 */
8862IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8863{
8864 /*
8865 * Check the input and figure out which mapping entry to use.
8866 */
8867 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8868 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8869 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8870
8871 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8872 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8873 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8874 {
8875 iMemMap = iemMemMapFindFree(pVCpu);
8876 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8877 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8878 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8879 pVCpu->iem.s.aMemMappings[2].fAccess),
8880 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8881 }
8882
8883 /*
8884 * Map the memory, checking that we can actually access it. If something
8885 * slightly complicated happens, fall back on bounce buffering.
8886 */
8887 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8888 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8889 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8890
8891 /* Crossing a page boundary? */
8892 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8893 { /* No (likely). */ }
8894 else
8895 {
8896 void *pvMem;
8897 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8898 if (rcStrict == VINF_SUCCESS)
8899 return pvMem;
8900 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8901 }
8902
8903 RTGCPHYS GCPhysFirst;
8904 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8905 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8906 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8907
8908 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8909 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8910 if (fAccess & IEM_ACCESS_TYPE_READ)
8911 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8912
8913 void *pvMem;
8914 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8915 if (rcStrict == VINF_SUCCESS)
8916 { /* likely */ }
8917 else
8918 {
8919 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8920 if (rcStrict == VINF_SUCCESS)
8921 return pvMem;
8922 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8923 }
8924
8925 /*
8926 * Fill in the mapping table entry.
8927 */
8928 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8929 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8930 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8931 pVCpu->iem.s.cActiveMappings++;
8932
8933 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8934 return pvMem;
8935}
8936
8937
8938/**
8939 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8940 *
8941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8942 * @param pvMem The mapping.
8943 * @param fAccess The kind of access.
8944 */
8945IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8946{
8947 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8948 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8949
8950 /* If it's bounce buffered, we may need to write back the buffer. */
8951 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8952 {
8953 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8954 {
8955 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8956 if (rcStrict == VINF_SUCCESS)
8957 return;
8958 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8959 }
8960 }
8961 /* Otherwise unlock it. */
8962 else
8963 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8964
8965 /* Free the entry. */
8966 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8967 Assert(pVCpu->iem.s.cActiveMappings != 0);
8968 pVCpu->iem.s.cActiveMappings--;
8969}
8970
8971#endif /* IEM_WITH_SETJMP */
8972
8973#ifndef IN_RING3
8974/**
8975 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8976 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8977 *
8978 * Allows the instruction to be completed and retired, while the IEM user will
8979 * return to ring-3 immediately afterwards and do the postponed writes there.
8980 *
8981 * @returns VBox status code (no strict statuses). Caller must check
8982 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8984 * @param pvMem The mapping.
8985 * @param fAccess The kind of access.
8986 */
8987IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8988{
8989 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8990 AssertReturn(iMemMap >= 0, iMemMap);
8991
8992 /* If it's bounce buffered, we may need to write back the buffer. */
8993 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8994 {
8995 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8996 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8997 }
8998 /* Otherwise unlock it. */
8999 else
9000 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9001
9002 /* Free the entry. */
9003 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9004 Assert(pVCpu->iem.s.cActiveMappings != 0);
9005 pVCpu->iem.s.cActiveMappings--;
9006 return VINF_SUCCESS;
9007}
9008#endif
9009
9010
9011/**
9012 * Rollbacks mappings, releasing page locks and such.
9013 *
9014 * The caller shall only call this after checking cActiveMappings.
9015 *
9016 * @returns Strict VBox status code to pass up.
9017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9018 */
9019IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9020{
9021 Assert(pVCpu->iem.s.cActiveMappings > 0);
9022
9023 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9024 while (iMemMap-- > 0)
9025 {
9026 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9027 if (fAccess != IEM_ACCESS_INVALID)
9028 {
9029 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9030 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9031 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9032 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9033 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9034 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9035 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9036 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9037 pVCpu->iem.s.cActiveMappings--;
9038 }
9039 }
9040}
9041
9042
9043/**
9044 * Fetches a data byte.
9045 *
9046 * @returns Strict VBox status code.
9047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9048 * @param pu8Dst Where to return the byte.
9049 * @param iSegReg The index of the segment register to use for
9050 * this access. The base and limits are checked.
9051 * @param GCPtrMem The address of the guest memory.
9052 */
9053IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9054{
9055 /* The lazy approach for now... */
9056 uint8_t const *pu8Src;
9057 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9058 if (rc == VINF_SUCCESS)
9059 {
9060 *pu8Dst = *pu8Src;
9061 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9062 }
9063 return rc;
9064}
9065
9066
9067#ifdef IEM_WITH_SETJMP
9068/**
9069 * Fetches a data byte, longjmp on error.
9070 *
9071 * @returns The byte.
9072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9073 * @param iSegReg The index of the segment register to use for
9074 * this access. The base and limits are checked.
9075 * @param GCPtrMem The address of the guest memory.
9076 */
9077DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9078{
9079 /* The lazy approach for now... */
9080 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9081 uint8_t const bRet = *pu8Src;
9082 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9083 return bRet;
9084}
9085#endif /* IEM_WITH_SETJMP */
9086
9087
9088/**
9089 * Fetches a data word.
9090 *
9091 * @returns Strict VBox status code.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param pu16Dst Where to return the word.
9094 * @param iSegReg The index of the segment register to use for
9095 * this access. The base and limits are checked.
9096 * @param GCPtrMem The address of the guest memory.
9097 */
9098IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9099{
9100 /* The lazy approach for now... */
9101 uint16_t const *pu16Src;
9102 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9103 if (rc == VINF_SUCCESS)
9104 {
9105 *pu16Dst = *pu16Src;
9106 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9107 }
9108 return rc;
9109}
9110
9111
9112#ifdef IEM_WITH_SETJMP
9113/**
9114 * Fetches a data word, longjmp on error.
9115 *
9116 * @returns The word
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param iSegReg The index of the segment register to use for
9119 * this access. The base and limits are checked.
9120 * @param GCPtrMem The address of the guest memory.
9121 */
9122DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9123{
9124 /* The lazy approach for now... */
9125 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9126 uint16_t const u16Ret = *pu16Src;
9127 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9128 return u16Ret;
9129}
9130#endif
9131
9132
9133/**
9134 * Fetches a data dword.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu32Dst Where to return the dword.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint32_t const *pu32Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu32Dst = *pu32Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158
9159IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9160{
9161 Assert(cbMem >= 1);
9162 Assert(iSegReg < X86_SREG_COUNT);
9163
9164 /*
9165 * 64-bit mode is simpler.
9166 */
9167 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9168 {
9169 if (iSegReg >= X86_SREG_FS)
9170 {
9171 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9172 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9173 GCPtrMem += pSel->u64Base;
9174 }
9175
9176 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9177 return GCPtrMem;
9178 }
9179 /*
9180 * 16-bit and 32-bit segmentation.
9181 */
9182 else
9183 {
9184 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9185 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9186 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9187 == X86DESCATTR_P /* data, expand up */
9188 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9189 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9190 {
9191 /* expand up */
9192 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9193 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9194 && GCPtrLast32 > (uint32_t)GCPtrMem))
9195 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9196 }
9197 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9198 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9199 {
9200 /* expand down */
9201 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9202 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9203 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9204 && GCPtrLast32 > (uint32_t)GCPtrMem))
9205 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9206 }
9207 else
9208 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9209 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9210 }
9211 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9212}
9213
9214
9215IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9216{
9217 Assert(cbMem >= 1);
9218 Assert(iSegReg < X86_SREG_COUNT);
9219
9220 /*
9221 * 64-bit mode is simpler.
9222 */
9223 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9224 {
9225 if (iSegReg >= X86_SREG_FS)
9226 {
9227 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9228 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9229 GCPtrMem += pSel->u64Base;
9230 }
9231
9232 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9233 return GCPtrMem;
9234 }
9235 /*
9236 * 16-bit and 32-bit segmentation.
9237 */
9238 else
9239 {
9240 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9241 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9242 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9243 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9244 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9245 {
9246 /* expand up */
9247 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9248 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9249 && GCPtrLast32 > (uint32_t)GCPtrMem))
9250 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9251 }
9252 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9253 {
9254 /* expand down */
9255 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9256 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9257 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9258 && GCPtrLast32 > (uint32_t)GCPtrMem))
9259 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9260 }
9261 else
9262 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9263 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9264 }
9265 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9266}
9267
9268
9269/**
9270 * Fetches a data dword, longjmp on error, fallback/safe version.
9271 *
9272 * @returns The dword
9273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9274 * @param iSegReg The index of the segment register to use for
9275 * this access. The base and limits are checked.
9276 * @param GCPtrMem The address of the guest memory.
9277 */
9278IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9279{
9280 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9281 uint32_t const u32Ret = *pu32Src;
9282 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9283 return u32Ret;
9284}
9285
9286
9287/**
9288 * Fetches a data dword, longjmp on error.
9289 *
9290 * @returns The dword
9291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9292 * @param iSegReg The index of the segment register to use for
9293 * this access. The base and limits are checked.
9294 * @param GCPtrMem The address of the guest memory.
9295 */
9296DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9297{
9298# ifdef IEM_WITH_DATA_TLB
9299 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9300 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9301 {
9302 /// @todo more later.
9303 }
9304
9305 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9306# else
9307 /* The lazy approach. */
9308 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9309 uint32_t const u32Ret = *pu32Src;
9310 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9311 return u32Ret;
9312# endif
9313}
9314#endif
9315
9316
9317#ifdef SOME_UNUSED_FUNCTION
9318/**
9319 * Fetches a data dword and sign extends it to a qword.
9320 *
9321 * @returns Strict VBox status code.
9322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9323 * @param pu64Dst Where to return the sign extended value.
9324 * @param iSegReg The index of the segment register to use for
9325 * this access. The base and limits are checked.
9326 * @param GCPtrMem The address of the guest memory.
9327 */
9328IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9329{
9330 /* The lazy approach for now... */
9331 int32_t const *pi32Src;
9332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9333 if (rc == VINF_SUCCESS)
9334 {
9335 *pu64Dst = *pi32Src;
9336 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9337 }
9338#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9339 else
9340 *pu64Dst = 0;
9341#endif
9342 return rc;
9343}
9344#endif
9345
9346
9347/**
9348 * Fetches a data qword.
9349 *
9350 * @returns Strict VBox status code.
9351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9352 * @param pu64Dst Where to return the qword.
9353 * @param iSegReg The index of the segment register to use for
9354 * this access. The base and limits are checked.
9355 * @param GCPtrMem The address of the guest memory.
9356 */
9357IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9358{
9359 /* The lazy approach for now... */
9360 uint64_t const *pu64Src;
9361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9362 if (rc == VINF_SUCCESS)
9363 {
9364 *pu64Dst = *pu64Src;
9365 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9366 }
9367 return rc;
9368}
9369
9370
9371#ifdef IEM_WITH_SETJMP
9372/**
9373 * Fetches a data qword, longjmp on error.
9374 *
9375 * @returns The qword.
9376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9377 * @param iSegReg The index of the segment register to use for
9378 * this access. The base and limits are checked.
9379 * @param GCPtrMem The address of the guest memory.
9380 */
9381DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9382{
9383 /* The lazy approach for now... */
9384 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9385 uint64_t const u64Ret = *pu64Src;
9386 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9387 return u64Ret;
9388}
9389#endif
9390
9391
9392/**
9393 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9394 *
9395 * @returns Strict VBox status code.
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param pu64Dst Where to return the qword.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 */
9402IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9403{
9404 /* The lazy approach for now... */
9405 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9406 if (RT_UNLIKELY(GCPtrMem & 15))
9407 return iemRaiseGeneralProtectionFault0(pVCpu);
9408
9409 uint64_t const *pu64Src;
9410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9411 if (rc == VINF_SUCCESS)
9412 {
9413 *pu64Dst = *pu64Src;
9414 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9415 }
9416 return rc;
9417}
9418
9419
9420#ifdef IEM_WITH_SETJMP
9421/**
9422 * Fetches a data qword, longjmp on error.
9423 *
9424 * @returns The qword.
9425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432 /* The lazy approach for now... */
9433 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9434 if (RT_LIKELY(!(GCPtrMem & 15)))
9435 {
9436 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9437 uint64_t const u64Ret = *pu64Src;
9438 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9439 return u64Ret;
9440 }
9441
9442 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9443 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9444}
9445#endif
9446
9447
9448/**
9449 * Fetches a data tword.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param pr80Dst Where to return the tword.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 PCRTFLOAT80U pr80Src;
9462 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9463 if (rc == VINF_SUCCESS)
9464 {
9465 *pr80Dst = *pr80Src;
9466 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9467 }
9468 return rc;
9469}
9470
9471
9472#ifdef IEM_WITH_SETJMP
9473/**
9474 * Fetches a data tword, longjmp on error.
9475 *
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param pr80Dst Where to return the tword.
9478 * @param iSegReg The index of the segment register to use for
9479 * this access. The base and limits are checked.
9480 * @param GCPtrMem The address of the guest memory.
9481 */
9482DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9483{
9484 /* The lazy approach for now... */
9485 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9486 *pr80Dst = *pr80Src;
9487 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9488}
9489#endif
9490
9491
9492/**
9493 * Fetches a data dqword (double qword), generally SSE related.
9494 *
9495 * @returns Strict VBox status code.
9496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9497 * @param pu128Dst Where to return the qword.
9498 * @param iSegReg The index of the segment register to use for
9499 * this access. The base and limits are checked.
9500 * @param GCPtrMem The address of the guest memory.
9501 */
9502IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9503{
9504 /* The lazy approach for now... */
9505 PCRTUINT128U pu128Src;
9506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9507 if (rc == VINF_SUCCESS)
9508 {
9509 pu128Dst->au64[0] = pu128Src->au64[0];
9510 pu128Dst->au64[1] = pu128Src->au64[1];
9511 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9512 }
9513 return rc;
9514}
9515
9516
9517#ifdef IEM_WITH_SETJMP
9518/**
9519 * Fetches a data dqword (double qword), generally SSE related.
9520 *
9521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9522 * @param pu128Dst Where to return the qword.
9523 * @param iSegReg The index of the segment register to use for
9524 * this access. The base and limits are checked.
9525 * @param GCPtrMem The address of the guest memory.
9526 */
9527IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9528{
9529 /* The lazy approach for now... */
9530 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9531 pu128Dst->au64[0] = pu128Src->au64[0];
9532 pu128Dst->au64[1] = pu128Src->au64[1];
9533 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9534}
9535#endif
9536
9537
9538/**
9539 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9540 * related.
9541 *
9542 * Raises \#GP(0) if not aligned.
9543 *
9544 * @returns Strict VBox status code.
9545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9546 * @param pu128Dst Where to return the qword.
9547 * @param iSegReg The index of the segment register to use for
9548 * this access. The base and limits are checked.
9549 * @param GCPtrMem The address of the guest memory.
9550 */
9551IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9552{
9553 /* The lazy approach for now... */
9554 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9555 if ( (GCPtrMem & 15)
9556 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9557 return iemRaiseGeneralProtectionFault0(pVCpu);
9558
9559 PCRTUINT128U pu128Src;
9560 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9561 if (rc == VINF_SUCCESS)
9562 {
9563 pu128Dst->au64[0] = pu128Src->au64[0];
9564 pu128Dst->au64[1] = pu128Src->au64[1];
9565 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9566 }
9567 return rc;
9568}
9569
9570
9571#ifdef IEM_WITH_SETJMP
9572/**
9573 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9574 * related, longjmp on error.
9575 *
9576 * Raises \#GP(0) if not aligned.
9577 *
9578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9579 * @param pu128Dst Where to return the qword.
9580 * @param iSegReg The index of the segment register to use for
9581 * this access. The base and limits are checked.
9582 * @param GCPtrMem The address of the guest memory.
9583 */
9584DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9585{
9586 /* The lazy approach for now... */
9587 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9588 if ( (GCPtrMem & 15) == 0
9589 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9590 {
9591 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9592 pu128Dst->au64[0] = pu128Src->au64[0];
9593 pu128Dst->au64[1] = pu128Src->au64[1];
9594 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9595 return;
9596 }
9597
9598 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9599 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9600}
9601#endif
9602
9603
9604/**
9605 * Fetches a data oword (octo word), generally AVX related.
9606 *
9607 * @returns Strict VBox status code.
9608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9609 * @param pu256Dst Where to return the qword.
9610 * @param iSegReg The index of the segment register to use for
9611 * this access. The base and limits are checked.
9612 * @param GCPtrMem The address of the guest memory.
9613 */
9614IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9615{
9616 /* The lazy approach for now... */
9617 PCRTUINT256U pu256Src;
9618 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9619 if (rc == VINF_SUCCESS)
9620 {
9621 pu256Dst->au64[0] = pu256Src->au64[0];
9622 pu256Dst->au64[1] = pu256Src->au64[1];
9623 pu256Dst->au64[2] = pu256Src->au64[2];
9624 pu256Dst->au64[3] = pu256Src->au64[3];
9625 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9626 }
9627 return rc;
9628}
9629
9630
9631#ifdef IEM_WITH_SETJMP
9632/**
9633 * Fetches a data oword (octo word), generally AVX related.
9634 *
9635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9636 * @param pu256Dst Where to return the qword.
9637 * @param iSegReg The index of the segment register to use for
9638 * this access. The base and limits are checked.
9639 * @param GCPtrMem The address of the guest memory.
9640 */
9641IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9642{
9643 /* The lazy approach for now... */
9644 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9645 pu256Dst->au64[0] = pu256Src->au64[0];
9646 pu256Dst->au64[1] = pu256Src->au64[1];
9647 pu256Dst->au64[2] = pu256Src->au64[2];
9648 pu256Dst->au64[3] = pu256Src->au64[3];
9649 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9650}
9651#endif
9652
9653
9654/**
9655 * Fetches a data oword (octo word) at an aligned address, generally AVX
9656 * related.
9657 *
9658 * Raises \#GP(0) if not aligned.
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param pu256Dst Where to return the qword.
9663 * @param iSegReg The index of the segment register to use for
9664 * this access. The base and limits are checked.
9665 * @param GCPtrMem The address of the guest memory.
9666 */
9667IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9668{
9669 /* The lazy approach for now... */
9670 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9671 if (GCPtrMem & 31)
9672 return iemRaiseGeneralProtectionFault0(pVCpu);
9673
9674 PCRTUINT256U pu256Src;
9675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9676 if (rc == VINF_SUCCESS)
9677 {
9678 pu256Dst->au64[0] = pu256Src->au64[0];
9679 pu256Dst->au64[1] = pu256Src->au64[1];
9680 pu256Dst->au64[2] = pu256Src->au64[2];
9681 pu256Dst->au64[3] = pu256Src->au64[3];
9682 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9683 }
9684 return rc;
9685}
9686
9687
9688#ifdef IEM_WITH_SETJMP
9689/**
9690 * Fetches a data oword (octo word) at an aligned address, generally AVX
9691 * related, longjmp on error.
9692 *
9693 * Raises \#GP(0) if not aligned.
9694 *
9695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9696 * @param pu256Dst Where to return the qword.
9697 * @param iSegReg The index of the segment register to use for
9698 * this access. The base and limits are checked.
9699 * @param GCPtrMem The address of the guest memory.
9700 */
9701DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9702{
9703 /* The lazy approach for now... */
9704 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9705 if ((GCPtrMem & 31) == 0)
9706 {
9707 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9708 pu256Dst->au64[0] = pu256Src->au64[0];
9709 pu256Dst->au64[1] = pu256Src->au64[1];
9710 pu256Dst->au64[2] = pu256Src->au64[2];
9711 pu256Dst->au64[3] = pu256Src->au64[3];
9712 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9713 return;
9714 }
9715
9716 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9717 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9718}
9719#endif
9720
9721
9722
9723/**
9724 * Fetches a descriptor register (lgdt, lidt).
9725 *
9726 * @returns Strict VBox status code.
9727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9728 * @param pcbLimit Where to return the limit.
9729 * @param pGCPtrBase Where to return the base.
9730 * @param iSegReg The index of the segment register to use for
9731 * this access. The base and limits are checked.
9732 * @param GCPtrMem The address of the guest memory.
9733 * @param enmOpSize The effective operand size.
9734 */
9735IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9736 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9737{
9738 /*
9739 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9740 * little special:
9741 * - The two reads are done separately.
9742 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9743 * - We suspect the 386 to actually commit the limit before the base in
9744 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9745 * don't try emulate this eccentric behavior, because it's not well
9746 * enough understood and rather hard to trigger.
9747 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9748 */
9749 VBOXSTRICTRC rcStrict;
9750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9751 {
9752 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9753 if (rcStrict == VINF_SUCCESS)
9754 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9755 }
9756 else
9757 {
9758 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9759 if (enmOpSize == IEMMODE_32BIT)
9760 {
9761 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9762 {
9763 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9764 if (rcStrict == VINF_SUCCESS)
9765 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9766 }
9767 else
9768 {
9769 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9770 if (rcStrict == VINF_SUCCESS)
9771 {
9772 *pcbLimit = (uint16_t)uTmp;
9773 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9774 }
9775 }
9776 if (rcStrict == VINF_SUCCESS)
9777 *pGCPtrBase = uTmp;
9778 }
9779 else
9780 {
9781 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9782 if (rcStrict == VINF_SUCCESS)
9783 {
9784 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9785 if (rcStrict == VINF_SUCCESS)
9786 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9787 }
9788 }
9789 }
9790 return rcStrict;
9791}
9792
9793
9794
9795/**
9796 * Stores a data byte.
9797 *
9798 * @returns Strict VBox status code.
9799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9800 * @param iSegReg The index of the segment register to use for
9801 * this access. The base and limits are checked.
9802 * @param GCPtrMem The address of the guest memory.
9803 * @param u8Value The value to store.
9804 */
9805IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9806{
9807 /* The lazy approach for now... */
9808 uint8_t *pu8Dst;
9809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9810 if (rc == VINF_SUCCESS)
9811 {
9812 *pu8Dst = u8Value;
9813 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9814 }
9815 return rc;
9816}
9817
9818
9819#ifdef IEM_WITH_SETJMP
9820/**
9821 * Stores a data byte, longjmp on error.
9822 *
9823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9824 * @param iSegReg The index of the segment register to use for
9825 * this access. The base and limits are checked.
9826 * @param GCPtrMem The address of the guest memory.
9827 * @param u8Value The value to store.
9828 */
9829IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9830{
9831 /* The lazy approach for now... */
9832 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9833 *pu8Dst = u8Value;
9834 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9835}
9836#endif
9837
9838
9839/**
9840 * Stores a data word.
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param iSegReg The index of the segment register to use for
9845 * this access. The base and limits are checked.
9846 * @param GCPtrMem The address of the guest memory.
9847 * @param u16Value The value to store.
9848 */
9849IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9850{
9851 /* The lazy approach for now... */
9852 uint16_t *pu16Dst;
9853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9854 if (rc == VINF_SUCCESS)
9855 {
9856 *pu16Dst = u16Value;
9857 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9858 }
9859 return rc;
9860}
9861
9862
9863#ifdef IEM_WITH_SETJMP
9864/**
9865 * Stores a data word, longjmp on error.
9866 *
9867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9868 * @param iSegReg The index of the segment register to use for
9869 * this access. The base and limits are checked.
9870 * @param GCPtrMem The address of the guest memory.
9871 * @param u16Value The value to store.
9872 */
9873IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9874{
9875 /* The lazy approach for now... */
9876 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9877 *pu16Dst = u16Value;
9878 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9879}
9880#endif
9881
9882
9883/**
9884 * Stores a data dword.
9885 *
9886 * @returns Strict VBox status code.
9887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9888 * @param iSegReg The index of the segment register to use for
9889 * this access. The base and limits are checked.
9890 * @param GCPtrMem The address of the guest memory.
9891 * @param u32Value The value to store.
9892 */
9893IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9894{
9895 /* The lazy approach for now... */
9896 uint32_t *pu32Dst;
9897 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9898 if (rc == VINF_SUCCESS)
9899 {
9900 *pu32Dst = u32Value;
9901 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9902 }
9903 return rc;
9904}
9905
9906
9907#ifdef IEM_WITH_SETJMP
9908/**
9909 * Stores a data dword.
9910 *
9911 * @returns Strict VBox status code.
9912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9913 * @param iSegReg The index of the segment register to use for
9914 * this access. The base and limits are checked.
9915 * @param GCPtrMem The address of the guest memory.
9916 * @param u32Value The value to store.
9917 */
9918IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9919{
9920 /* The lazy approach for now... */
9921 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9922 *pu32Dst = u32Value;
9923 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9924}
9925#endif
9926
9927
9928/**
9929 * Stores a data qword.
9930 *
9931 * @returns Strict VBox status code.
9932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9933 * @param iSegReg The index of the segment register to use for
9934 * this access. The base and limits are checked.
9935 * @param GCPtrMem The address of the guest memory.
9936 * @param u64Value The value to store.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9939{
9940 /* The lazy approach for now... */
9941 uint64_t *pu64Dst;
9942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9943 if (rc == VINF_SUCCESS)
9944 {
9945 *pu64Dst = u64Value;
9946 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9947 }
9948 return rc;
9949}
9950
9951
9952#ifdef IEM_WITH_SETJMP
9953/**
9954 * Stores a data qword, longjmp on error.
9955 *
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param iSegReg The index of the segment register to use for
9958 * this access. The base and limits are checked.
9959 * @param GCPtrMem The address of the guest memory.
9960 * @param u64Value The value to store.
9961 */
9962IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9963{
9964 /* The lazy approach for now... */
9965 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9966 *pu64Dst = u64Value;
9967 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9968}
9969#endif
9970
9971
9972/**
9973 * Stores a data dqword.
9974 *
9975 * @returns Strict VBox status code.
9976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9977 * @param iSegReg The index of the segment register to use for
9978 * this access. The base and limits are checked.
9979 * @param GCPtrMem The address of the guest memory.
9980 * @param u128Value The value to store.
9981 */
9982IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9983{
9984 /* The lazy approach for now... */
9985 PRTUINT128U pu128Dst;
9986 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9987 if (rc == VINF_SUCCESS)
9988 {
9989 pu128Dst->au64[0] = u128Value.au64[0];
9990 pu128Dst->au64[1] = u128Value.au64[1];
9991 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9992 }
9993 return rc;
9994}
9995
9996
9997#ifdef IEM_WITH_SETJMP
9998/**
9999 * Stores a data dqword, longjmp on error.
10000 *
10001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10002 * @param iSegReg The index of the segment register to use for
10003 * this access. The base and limits are checked.
10004 * @param GCPtrMem The address of the guest memory.
10005 * @param u128Value The value to store.
10006 */
10007IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10008{
10009 /* The lazy approach for now... */
10010 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10011 pu128Dst->au64[0] = u128Value.au64[0];
10012 pu128Dst->au64[1] = u128Value.au64[1];
10013 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10014}
10015#endif
10016
10017
10018/**
10019 * Stores a data dqword, SSE aligned.
10020 *
10021 * @returns Strict VBox status code.
10022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10023 * @param iSegReg The index of the segment register to use for
10024 * this access. The base and limits are checked.
10025 * @param GCPtrMem The address of the guest memory.
10026 * @param u128Value The value to store.
10027 */
10028IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10029{
10030 /* The lazy approach for now... */
10031 if ( (GCPtrMem & 15)
10032 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10033 return iemRaiseGeneralProtectionFault0(pVCpu);
10034
10035 PRTUINT128U pu128Dst;
10036 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10037 if (rc == VINF_SUCCESS)
10038 {
10039 pu128Dst->au64[0] = u128Value.au64[0];
10040 pu128Dst->au64[1] = u128Value.au64[1];
10041 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10042 }
10043 return rc;
10044}
10045
10046
10047#ifdef IEM_WITH_SETJMP
10048/**
10049 * Stores a data dqword, SSE aligned.
10050 *
10051 * @returns Strict VBox status code.
10052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10053 * @param iSegReg The index of the segment register to use for
10054 * this access. The base and limits are checked.
10055 * @param GCPtrMem The address of the guest memory.
10056 * @param u128Value The value to store.
10057 */
10058DECL_NO_INLINE(IEM_STATIC, void)
10059iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10060{
10061 /* The lazy approach for now... */
10062 if ( (GCPtrMem & 15) == 0
10063 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10064 {
10065 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10066 pu128Dst->au64[0] = u128Value.au64[0];
10067 pu128Dst->au64[1] = u128Value.au64[1];
10068 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10069 return;
10070 }
10071
10072 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10073 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10074}
10075#endif
10076
10077
10078/**
10079 * Stores a data dqword.
10080 *
10081 * @returns Strict VBox status code.
10082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10083 * @param iSegReg The index of the segment register to use for
10084 * this access. The base and limits are checked.
10085 * @param GCPtrMem The address of the guest memory.
10086 * @param pu256Value Pointer to the value to store.
10087 */
10088IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10089{
10090 /* The lazy approach for now... */
10091 PRTUINT256U pu256Dst;
10092 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10093 if (rc == VINF_SUCCESS)
10094 {
10095 pu256Dst->au64[0] = pu256Value->au64[0];
10096 pu256Dst->au64[1] = pu256Value->au64[1];
10097 pu256Dst->au64[2] = pu256Value->au64[2];
10098 pu256Dst->au64[3] = pu256Value->au64[3];
10099 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10100 }
10101 return rc;
10102}
10103
10104
10105#ifdef IEM_WITH_SETJMP
10106/**
10107 * Stores a data dqword, longjmp on error.
10108 *
10109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10110 * @param iSegReg The index of the segment register to use for
10111 * this access. The base and limits are checked.
10112 * @param GCPtrMem The address of the guest memory.
10113 * @param pu256Value Pointer to the value to store.
10114 */
10115IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10116{
10117 /* The lazy approach for now... */
10118 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10119 pu256Dst->au64[0] = pu256Value->au64[0];
10120 pu256Dst->au64[1] = pu256Value->au64[1];
10121 pu256Dst->au64[2] = pu256Value->au64[2];
10122 pu256Dst->au64[3] = pu256Value->au64[3];
10123 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10124}
10125#endif
10126
10127
10128/**
10129 * Stores a data dqword, AVX aligned.
10130 *
10131 * @returns Strict VBox status code.
10132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10133 * @param iSegReg The index of the segment register to use for
10134 * this access. The base and limits are checked.
10135 * @param GCPtrMem The address of the guest memory.
10136 * @param pu256Value Pointer to the value to store.
10137 */
10138IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10139{
10140 /* The lazy approach for now... */
10141 if (GCPtrMem & 31)
10142 return iemRaiseGeneralProtectionFault0(pVCpu);
10143
10144 PRTUINT256U pu256Dst;
10145 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10146 if (rc == VINF_SUCCESS)
10147 {
10148 pu256Dst->au64[0] = pu256Value->au64[0];
10149 pu256Dst->au64[1] = pu256Value->au64[1];
10150 pu256Dst->au64[2] = pu256Value->au64[2];
10151 pu256Dst->au64[3] = pu256Value->au64[3];
10152 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10153 }
10154 return rc;
10155}
10156
10157
10158#ifdef IEM_WITH_SETJMP
10159/**
10160 * Stores a data dqword, AVX aligned.
10161 *
10162 * @returns Strict VBox status code.
10163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10164 * @param iSegReg The index of the segment register to use for
10165 * this access. The base and limits are checked.
10166 * @param GCPtrMem The address of the guest memory.
10167 * @param pu256Value Pointer to the value to store.
10168 */
10169DECL_NO_INLINE(IEM_STATIC, void)
10170iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10171{
10172 /* The lazy approach for now... */
10173 if ((GCPtrMem & 31) == 0)
10174 {
10175 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10176 pu256Dst->au64[0] = pu256Value->au64[0];
10177 pu256Dst->au64[1] = pu256Value->au64[1];
10178 pu256Dst->au64[2] = pu256Value->au64[2];
10179 pu256Dst->au64[3] = pu256Value->au64[3];
10180 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10181 return;
10182 }
10183
10184 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10185 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10186}
10187#endif
10188
10189
10190/**
10191 * Stores a descriptor register (sgdt, sidt).
10192 *
10193 * @returns Strict VBox status code.
10194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10195 * @param cbLimit The limit.
10196 * @param GCPtrBase The base address.
10197 * @param iSegReg The index of the segment register to use for
10198 * this access. The base and limits are checked.
10199 * @param GCPtrMem The address of the guest memory.
10200 */
10201IEM_STATIC VBOXSTRICTRC
10202iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10203{
10204 /*
10205 * The SIDT and SGDT instructions actually stores the data using two
10206 * independent writes. The instructions does not respond to opsize prefixes.
10207 */
10208 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10209 if (rcStrict == VINF_SUCCESS)
10210 {
10211 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10212 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10213 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10214 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10215 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10216 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10217 else
10218 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10219 }
10220 return rcStrict;
10221}
10222
10223
10224/**
10225 * Pushes a word onto the stack.
10226 *
10227 * @returns Strict VBox status code.
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param u16Value The value to push.
10230 */
10231IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10232{
10233 /* Increment the stack pointer. */
10234 uint64_t uNewRsp;
10235 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10236
10237 /* Write the word the lazy way. */
10238 uint16_t *pu16Dst;
10239 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10240 if (rc == VINF_SUCCESS)
10241 {
10242 *pu16Dst = u16Value;
10243 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10244 }
10245
10246 /* Commit the new RSP value unless we an access handler made trouble. */
10247 if (rc == VINF_SUCCESS)
10248 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10249
10250 return rc;
10251}
10252
10253
10254/**
10255 * Pushes a dword onto the stack.
10256 *
10257 * @returns Strict VBox status code.
10258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10259 * @param u32Value The value to push.
10260 */
10261IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10262{
10263 /* Increment the stack pointer. */
10264 uint64_t uNewRsp;
10265 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10266
10267 /* Write the dword the lazy way. */
10268 uint32_t *pu32Dst;
10269 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10270 if (rc == VINF_SUCCESS)
10271 {
10272 *pu32Dst = u32Value;
10273 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10274 }
10275
10276 /* Commit the new RSP value unless we an access handler made trouble. */
10277 if (rc == VINF_SUCCESS)
10278 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10279
10280 return rc;
10281}
10282
10283
10284/**
10285 * Pushes a dword segment register value onto the stack.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10289 * @param u32Value The value to push.
10290 */
10291IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10292{
10293 /* Increment the stack pointer. */
10294 uint64_t uNewRsp;
10295 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10296
10297 /* The intel docs talks about zero extending the selector register
10298 value. My actual intel CPU here might be zero extending the value
10299 but it still only writes the lower word... */
10300 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10301 * happens when crossing an electric page boundrary, is the high word checked
10302 * for write accessibility or not? Probably it is. What about segment limits?
10303 * It appears this behavior is also shared with trap error codes.
10304 *
10305 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10306 * ancient hardware when it actually did change. */
10307 uint16_t *pu16Dst;
10308 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10309 if (rc == VINF_SUCCESS)
10310 {
10311 *pu16Dst = (uint16_t)u32Value;
10312 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10313 }
10314
10315 /* Commit the new RSP value unless we an access handler made trouble. */
10316 if (rc == VINF_SUCCESS)
10317 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10318
10319 return rc;
10320}
10321
10322
10323/**
10324 * Pushes a qword onto the stack.
10325 *
10326 * @returns Strict VBox status code.
10327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10328 * @param u64Value The value to push.
10329 */
10330IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10331{
10332 /* Increment the stack pointer. */
10333 uint64_t uNewRsp;
10334 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10335
10336 /* Write the word the lazy way. */
10337 uint64_t *pu64Dst;
10338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10339 if (rc == VINF_SUCCESS)
10340 {
10341 *pu64Dst = u64Value;
10342 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10343 }
10344
10345 /* Commit the new RSP value unless we an access handler made trouble. */
10346 if (rc == VINF_SUCCESS)
10347 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10348
10349 return rc;
10350}
10351
10352
10353/**
10354 * Pops a word from the stack.
10355 *
10356 * @returns Strict VBox status code.
10357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10358 * @param pu16Value Where to store the popped value.
10359 */
10360IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10361{
10362 /* Increment the stack pointer. */
10363 uint64_t uNewRsp;
10364 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10365
10366 /* Write the word the lazy way. */
10367 uint16_t const *pu16Src;
10368 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10369 if (rc == VINF_SUCCESS)
10370 {
10371 *pu16Value = *pu16Src;
10372 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10373
10374 /* Commit the new RSP value. */
10375 if (rc == VINF_SUCCESS)
10376 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10377 }
10378
10379 return rc;
10380}
10381
10382
10383/**
10384 * Pops a dword from the stack.
10385 *
10386 * @returns Strict VBox status code.
10387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10388 * @param pu32Value Where to store the popped value.
10389 */
10390IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10391{
10392 /* Increment the stack pointer. */
10393 uint64_t uNewRsp;
10394 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10395
10396 /* Write the word the lazy way. */
10397 uint32_t const *pu32Src;
10398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu32Value = *pu32Src;
10402 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10403
10404 /* Commit the new RSP value. */
10405 if (rc == VINF_SUCCESS)
10406 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10407 }
10408
10409 return rc;
10410}
10411
10412
10413/**
10414 * Pops a qword from the stack.
10415 *
10416 * @returns Strict VBox status code.
10417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10418 * @param pu64Value Where to store the popped value.
10419 */
10420IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10421{
10422 /* Increment the stack pointer. */
10423 uint64_t uNewRsp;
10424 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10425
10426 /* Write the word the lazy way. */
10427 uint64_t const *pu64Src;
10428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10429 if (rc == VINF_SUCCESS)
10430 {
10431 *pu64Value = *pu64Src;
10432 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10433
10434 /* Commit the new RSP value. */
10435 if (rc == VINF_SUCCESS)
10436 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10437 }
10438
10439 return rc;
10440}
10441
10442
10443/**
10444 * Pushes a word onto the stack, using a temporary stack pointer.
10445 *
10446 * @returns Strict VBox status code.
10447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10448 * @param u16Value The value to push.
10449 * @param pTmpRsp Pointer to the temporary stack pointer.
10450 */
10451IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10452{
10453 /* Increment the stack pointer. */
10454 RTUINT64U NewRsp = *pTmpRsp;
10455 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10456
10457 /* Write the word the lazy way. */
10458 uint16_t *pu16Dst;
10459 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10460 if (rc == VINF_SUCCESS)
10461 {
10462 *pu16Dst = u16Value;
10463 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10464 }
10465
10466 /* Commit the new RSP value unless we an access handler made trouble. */
10467 if (rc == VINF_SUCCESS)
10468 *pTmpRsp = NewRsp;
10469
10470 return rc;
10471}
10472
10473
10474/**
10475 * Pushes a dword onto the stack, using a temporary stack pointer.
10476 *
10477 * @returns Strict VBox status code.
10478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10479 * @param u32Value The value to push.
10480 * @param pTmpRsp Pointer to the temporary stack pointer.
10481 */
10482IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10483{
10484 /* Increment the stack pointer. */
10485 RTUINT64U NewRsp = *pTmpRsp;
10486 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10487
10488 /* Write the word the lazy way. */
10489 uint32_t *pu32Dst;
10490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10491 if (rc == VINF_SUCCESS)
10492 {
10493 *pu32Dst = u32Value;
10494 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10495 }
10496
10497 /* Commit the new RSP value unless we an access handler made trouble. */
10498 if (rc == VINF_SUCCESS)
10499 *pTmpRsp = NewRsp;
10500
10501 return rc;
10502}
10503
10504
10505/**
10506 * Pushes a dword onto the stack, using a temporary stack pointer.
10507 *
10508 * @returns Strict VBox status code.
10509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10510 * @param u64Value The value to push.
10511 * @param pTmpRsp Pointer to the temporary stack pointer.
10512 */
10513IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10514{
10515 /* Increment the stack pointer. */
10516 RTUINT64U NewRsp = *pTmpRsp;
10517 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10518
10519 /* Write the word the lazy way. */
10520 uint64_t *pu64Dst;
10521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10522 if (rc == VINF_SUCCESS)
10523 {
10524 *pu64Dst = u64Value;
10525 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10526 }
10527
10528 /* Commit the new RSP value unless we an access handler made trouble. */
10529 if (rc == VINF_SUCCESS)
10530 *pTmpRsp = NewRsp;
10531
10532 return rc;
10533}
10534
10535
10536/**
10537 * Pops a word from the stack, using a temporary stack pointer.
10538 *
10539 * @returns Strict VBox status code.
10540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10541 * @param pu16Value Where to store the popped value.
10542 * @param pTmpRsp Pointer to the temporary stack pointer.
10543 */
10544IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10545{
10546 /* Increment the stack pointer. */
10547 RTUINT64U NewRsp = *pTmpRsp;
10548 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10549
10550 /* Write the word the lazy way. */
10551 uint16_t const *pu16Src;
10552 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10553 if (rc == VINF_SUCCESS)
10554 {
10555 *pu16Value = *pu16Src;
10556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10557
10558 /* Commit the new RSP value. */
10559 if (rc == VINF_SUCCESS)
10560 *pTmpRsp = NewRsp;
10561 }
10562
10563 return rc;
10564}
10565
10566
10567/**
10568 * Pops a dword from the stack, using a temporary stack pointer.
10569 *
10570 * @returns Strict VBox status code.
10571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10572 * @param pu32Value Where to store the popped value.
10573 * @param pTmpRsp Pointer to the temporary stack pointer.
10574 */
10575IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10576{
10577 /* Increment the stack pointer. */
10578 RTUINT64U NewRsp = *pTmpRsp;
10579 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10580
10581 /* Write the word the lazy way. */
10582 uint32_t const *pu32Src;
10583 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10584 if (rc == VINF_SUCCESS)
10585 {
10586 *pu32Value = *pu32Src;
10587 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10588
10589 /* Commit the new RSP value. */
10590 if (rc == VINF_SUCCESS)
10591 *pTmpRsp = NewRsp;
10592 }
10593
10594 return rc;
10595}
10596
10597
10598/**
10599 * Pops a qword from the stack, using a temporary stack pointer.
10600 *
10601 * @returns Strict VBox status code.
10602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10603 * @param pu64Value Where to store the popped value.
10604 * @param pTmpRsp Pointer to the temporary stack pointer.
10605 */
10606IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10607{
10608 /* Increment the stack pointer. */
10609 RTUINT64U NewRsp = *pTmpRsp;
10610 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10611
10612 /* Write the word the lazy way. */
10613 uint64_t const *pu64Src;
10614 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10615 if (rcStrict == VINF_SUCCESS)
10616 {
10617 *pu64Value = *pu64Src;
10618 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10619
10620 /* Commit the new RSP value. */
10621 if (rcStrict == VINF_SUCCESS)
10622 *pTmpRsp = NewRsp;
10623 }
10624
10625 return rcStrict;
10626}
10627
10628
10629/**
10630 * Begin a special stack push (used by interrupt, exceptions and such).
10631 *
10632 * This will raise \#SS or \#PF if appropriate.
10633 *
10634 * @returns Strict VBox status code.
10635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10636 * @param cbMem The number of bytes to push onto the stack.
10637 * @param ppvMem Where to return the pointer to the stack memory.
10638 * As with the other memory functions this could be
10639 * direct access or bounce buffered access, so
10640 * don't commit register until the commit call
10641 * succeeds.
10642 * @param puNewRsp Where to return the new RSP value. This must be
10643 * passed unchanged to
10644 * iemMemStackPushCommitSpecial().
10645 */
10646IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10647{
10648 Assert(cbMem < UINT8_MAX);
10649 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10650 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10651}
10652
10653
10654/**
10655 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10656 *
10657 * This will update the rSP.
10658 *
10659 * @returns Strict VBox status code.
10660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10661 * @param pvMem The pointer returned by
10662 * iemMemStackPushBeginSpecial().
10663 * @param uNewRsp The new RSP value returned by
10664 * iemMemStackPushBeginSpecial().
10665 */
10666IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10667{
10668 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10669 if (rcStrict == VINF_SUCCESS)
10670 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10671 return rcStrict;
10672}
10673
10674
10675/**
10676 * Begin a special stack pop (used by iret, retf and such).
10677 *
10678 * This will raise \#SS or \#PF if appropriate.
10679 *
10680 * @returns Strict VBox status code.
10681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10682 * @param cbMem The number of bytes to pop from the stack.
10683 * @param ppvMem Where to return the pointer to the stack memory.
10684 * @param puNewRsp Where to return the new RSP value. This must be
10685 * assigned to CPUMCTX::rsp manually some time
10686 * after iemMemStackPopDoneSpecial() has been
10687 * called.
10688 */
10689IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10690{
10691 Assert(cbMem < UINT8_MAX);
10692 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10693 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10694}
10695
10696
10697/**
10698 * Continue a special stack pop (used by iret and retf).
10699 *
10700 * This will raise \#SS or \#PF if appropriate.
10701 *
10702 * @returns Strict VBox status code.
10703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10704 * @param cbMem The number of bytes to pop from the stack.
10705 * @param ppvMem Where to return the pointer to the stack memory.
10706 * @param puNewRsp Where to return the new RSP value. This must be
10707 * assigned to CPUMCTX::rsp manually some time
10708 * after iemMemStackPopDoneSpecial() has been
10709 * called.
10710 */
10711IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10712{
10713 Assert(cbMem < UINT8_MAX);
10714 RTUINT64U NewRsp;
10715 NewRsp.u = *puNewRsp;
10716 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10717 *puNewRsp = NewRsp.u;
10718 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10719}
10720
10721
10722/**
10723 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10724 * iemMemStackPopContinueSpecial).
10725 *
10726 * The caller will manually commit the rSP.
10727 *
10728 * @returns Strict VBox status code.
10729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10730 * @param pvMem The pointer returned by
10731 * iemMemStackPopBeginSpecial() or
10732 * iemMemStackPopContinueSpecial().
10733 */
10734IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10735{
10736 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10737}
10738
10739
10740/**
10741 * Fetches a system table byte.
10742 *
10743 * @returns Strict VBox status code.
10744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10745 * @param pbDst Where to return the byte.
10746 * @param iSegReg The index of the segment register to use for
10747 * this access. The base and limits are checked.
10748 * @param GCPtrMem The address of the guest memory.
10749 */
10750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10751{
10752 /* The lazy approach for now... */
10753 uint8_t const *pbSrc;
10754 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10755 if (rc == VINF_SUCCESS)
10756 {
10757 *pbDst = *pbSrc;
10758 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10759 }
10760 return rc;
10761}
10762
10763
10764/**
10765 * Fetches a system table word.
10766 *
10767 * @returns Strict VBox status code.
10768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10769 * @param pu16Dst Where to return the word.
10770 * @param iSegReg The index of the segment register to use for
10771 * this access. The base and limits are checked.
10772 * @param GCPtrMem The address of the guest memory.
10773 */
10774IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10775{
10776 /* The lazy approach for now... */
10777 uint16_t const *pu16Src;
10778 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10779 if (rc == VINF_SUCCESS)
10780 {
10781 *pu16Dst = *pu16Src;
10782 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10783 }
10784 return rc;
10785}
10786
10787
10788/**
10789 * Fetches a system table dword.
10790 *
10791 * @returns Strict VBox status code.
10792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10793 * @param pu32Dst Where to return the dword.
10794 * @param iSegReg The index of the segment register to use for
10795 * this access. The base and limits are checked.
10796 * @param GCPtrMem The address of the guest memory.
10797 */
10798IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10799{
10800 /* The lazy approach for now... */
10801 uint32_t const *pu32Src;
10802 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10803 if (rc == VINF_SUCCESS)
10804 {
10805 *pu32Dst = *pu32Src;
10806 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10807 }
10808 return rc;
10809}
10810
10811
10812/**
10813 * Fetches a system table qword.
10814 *
10815 * @returns Strict VBox status code.
10816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10817 * @param pu64Dst Where to return the qword.
10818 * @param iSegReg The index of the segment register to use for
10819 * this access. The base and limits are checked.
10820 * @param GCPtrMem The address of the guest memory.
10821 */
10822IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10823{
10824 /* The lazy approach for now... */
10825 uint64_t const *pu64Src;
10826 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10827 if (rc == VINF_SUCCESS)
10828 {
10829 *pu64Dst = *pu64Src;
10830 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10831 }
10832 return rc;
10833}
10834
10835
10836/**
10837 * Fetches a descriptor table entry with caller specified error code.
10838 *
10839 * @returns Strict VBox status code.
10840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10841 * @param pDesc Where to return the descriptor table entry.
10842 * @param uSel The selector which table entry to fetch.
10843 * @param uXcpt The exception to raise on table lookup error.
10844 * @param uErrorCode The error code associated with the exception.
10845 */
10846IEM_STATIC VBOXSTRICTRC
10847iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10848{
10849 AssertPtr(pDesc);
10850 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10851
10852 /** @todo did the 286 require all 8 bytes to be accessible? */
10853 /*
10854 * Get the selector table base and check bounds.
10855 */
10856 RTGCPTR GCPtrBase;
10857 if (uSel & X86_SEL_LDT)
10858 {
10859 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10860 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10861 {
10862 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10863 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10864 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10865 uErrorCode, 0);
10866 }
10867
10868 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10869 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10870 }
10871 else
10872 {
10873 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10874 {
10875 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10876 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10877 uErrorCode, 0);
10878 }
10879 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10880 }
10881
10882 /*
10883 * Read the legacy descriptor and maybe the long mode extensions if
10884 * required.
10885 */
10886 VBOXSTRICTRC rcStrict;
10887 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10888 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10889 else
10890 {
10891 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10892 if (rcStrict == VINF_SUCCESS)
10893 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10894 if (rcStrict == VINF_SUCCESS)
10895 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10896 if (rcStrict == VINF_SUCCESS)
10897 pDesc->Legacy.au16[3] = 0;
10898 else
10899 return rcStrict;
10900 }
10901
10902 if (rcStrict == VINF_SUCCESS)
10903 {
10904 if ( !IEM_IS_LONG_MODE(pVCpu)
10905 || pDesc->Legacy.Gen.u1DescType)
10906 pDesc->Long.au64[1] = 0;
10907 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10908 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10909 else
10910 {
10911 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10912 /** @todo is this the right exception? */
10913 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10914 }
10915 }
10916 return rcStrict;
10917}
10918
10919
10920/**
10921 * Fetches a descriptor table entry.
10922 *
10923 * @returns Strict VBox status code.
10924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10925 * @param pDesc Where to return the descriptor table entry.
10926 * @param uSel The selector which table entry to fetch.
10927 * @param uXcpt The exception to raise on table lookup error.
10928 */
10929IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10930{
10931 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10932}
10933
10934
10935/**
10936 * Fakes a long mode stack selector for SS = 0.
10937 *
10938 * @param pDescSs Where to return the fake stack descriptor.
10939 * @param uDpl The DPL we want.
10940 */
10941IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10942{
10943 pDescSs->Long.au64[0] = 0;
10944 pDescSs->Long.au64[1] = 0;
10945 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10946 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10947 pDescSs->Long.Gen.u2Dpl = uDpl;
10948 pDescSs->Long.Gen.u1Present = 1;
10949 pDescSs->Long.Gen.u1Long = 1;
10950}
10951
10952
10953/**
10954 * Marks the selector descriptor as accessed (only non-system descriptors).
10955 *
10956 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10957 * will therefore skip the limit checks.
10958 *
10959 * @returns Strict VBox status code.
10960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10961 * @param uSel The selector.
10962 */
10963IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
10964{
10965 /*
10966 * Get the selector table base and calculate the entry address.
10967 */
10968 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10969 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10970 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10971 GCPtr += uSel & X86_SEL_MASK;
10972
10973 /*
10974 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10975 * ugly stuff to avoid this. This will make sure it's an atomic access
10976 * as well more or less remove any question about 8-bit or 32-bit accesss.
10977 */
10978 VBOXSTRICTRC rcStrict;
10979 uint32_t volatile *pu32;
10980 if ((GCPtr & 3) == 0)
10981 {
10982 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10983 GCPtr += 2 + 2;
10984 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10985 if (rcStrict != VINF_SUCCESS)
10986 return rcStrict;
10987 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10988 }
10989 else
10990 {
10991 /* The misaligned GDT/LDT case, map the whole thing. */
10992 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10993 if (rcStrict != VINF_SUCCESS)
10994 return rcStrict;
10995 switch ((uintptr_t)pu32 & 3)
10996 {
10997 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10998 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10999 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11000 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11001 }
11002 }
11003
11004 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11005}
11006
11007/** @} */
11008
11009
11010/*
11011 * Include the C/C++ implementation of instruction.
11012 */
11013#include "IEMAllCImpl.cpp.h"
11014
11015
11016
11017/** @name "Microcode" macros.
11018 *
11019 * The idea is that we should be able to use the same code to interpret
11020 * instructions as well as recompiler instructions. Thus this obfuscation.
11021 *
11022 * @{
11023 */
11024#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11025#define IEM_MC_END() }
11026#define IEM_MC_PAUSE() do {} while (0)
11027#define IEM_MC_CONTINUE() do {} while (0)
11028
11029/** Internal macro. */
11030#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11031 do \
11032 { \
11033 VBOXSTRICTRC rcStrict2 = a_Expr; \
11034 if (rcStrict2 != VINF_SUCCESS) \
11035 return rcStrict2; \
11036 } while (0)
11037
11038
11039#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11040#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11041#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11042#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11043#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11044#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11045#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11046#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11047#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11048 do { \
11049 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11050 return iemRaiseDeviceNotAvailable(pVCpu); \
11051 } while (0)
11052#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11053 do { \
11054 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11055 return iemRaiseDeviceNotAvailable(pVCpu); \
11056 } while (0)
11057#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11058 do { \
11059 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11060 return iemRaiseMathFault(pVCpu); \
11061 } while (0)
11062#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11063 do { \
11064 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11065 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11066 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11067 return iemRaiseUndefinedOpcode(pVCpu); \
11068 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11069 return iemRaiseDeviceNotAvailable(pVCpu); \
11070 } while (0)
11071#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11072 do { \
11073 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11074 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11075 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11076 return iemRaiseUndefinedOpcode(pVCpu); \
11077 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11078 return iemRaiseDeviceNotAvailable(pVCpu); \
11079 } while (0)
11080#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11081 do { \
11082 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11083 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11084 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11085 return iemRaiseUndefinedOpcode(pVCpu); \
11086 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11087 return iemRaiseDeviceNotAvailable(pVCpu); \
11088 } while (0)
11089#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11090 do { \
11091 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11092 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11093 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11094 return iemRaiseUndefinedOpcode(pVCpu); \
11095 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11096 return iemRaiseDeviceNotAvailable(pVCpu); \
11097 } while (0)
11098#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11099 do { \
11100 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11101 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11102 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11103 return iemRaiseUndefinedOpcode(pVCpu); \
11104 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11105 return iemRaiseDeviceNotAvailable(pVCpu); \
11106 } while (0)
11107#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11108 do { \
11109 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11112 return iemRaiseUndefinedOpcode(pVCpu); \
11113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11114 return iemRaiseDeviceNotAvailable(pVCpu); \
11115 } while (0)
11116#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11117 do { \
11118 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11119 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11120 return iemRaiseUndefinedOpcode(pVCpu); \
11121 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11122 return iemRaiseDeviceNotAvailable(pVCpu); \
11123 } while (0)
11124#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11125 do { \
11126 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11127 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11128 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11129 return iemRaiseUndefinedOpcode(pVCpu); \
11130 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11131 return iemRaiseDeviceNotAvailable(pVCpu); \
11132 } while (0)
11133#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11134 do { \
11135 if (pVCpu->iem.s.uCpl != 0) \
11136 return iemRaiseGeneralProtectionFault0(pVCpu); \
11137 } while (0)
11138#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11139 do { \
11140 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11141 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11144 do { \
11145 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11146 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11147 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11148 return iemRaiseUndefinedOpcode(pVCpu); \
11149 } while (0)
11150#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11151 do { \
11152 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11153 return iemRaiseGeneralProtectionFault0(pVCpu); \
11154 } while (0)
11155
11156
11157#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11158#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11159#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11160#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11161#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11162#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11163#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11164 uint32_t a_Name; \
11165 uint32_t *a_pName = &a_Name
11166#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11167 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11168
11169#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11170#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11171
11172#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11173#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11174#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11175#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11176#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11189#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11190 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11191 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11192 } while (0)
11193#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11194 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11195 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11196 } while (0)
11197#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11198 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11199 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11200 } while (0)
11201/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11202#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11203 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11204 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11205 } while (0)
11206#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11207 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11208 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11209 } while (0)
11210/** @note Not for IOPL or IF testing or modification. */
11211#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11212#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11213#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11214#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11215
11216#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11217#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11218#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11219#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11220#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11221#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11222#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11223#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11224#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11225#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11226/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11227#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11228 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11229 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11230 } while (0)
11231#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11232 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11233 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11234 } while (0)
11235#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11236 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11237
11238
11239#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11240#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11241/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11242 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11243#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11244#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11245/** @note Not for IOPL or IF testing or modification. */
11246#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11247
11248#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11249#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11250#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11251 do { \
11252 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11253 *pu32Reg += (a_u32Value); \
11254 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11255 } while (0)
11256#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11257
11258#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11259#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11260#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11261 do { \
11262 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11263 *pu32Reg -= (a_u32Value); \
11264 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11265 } while (0)
11266#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11267#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11268
11269#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11270#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11271#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11272#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11273#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11274#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11275#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11276
11277#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11278#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11279#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11280#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11281
11282#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11283#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11284#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11285
11286#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11287#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11288#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11289
11290#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11291#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11292#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11293
11294#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11295#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11296#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11297
11298#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11299
11300#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11301
11302#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11303#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11304#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11305 do { \
11306 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11307 *pu32Reg &= (a_u32Value); \
11308 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11309 } while (0)
11310#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11311
11312#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11313#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11314#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11315 do { \
11316 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11317 *pu32Reg |= (a_u32Value); \
11318 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11319 } while (0)
11320#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11321
11322
11323/** @note Not for IOPL or IF modification. */
11324#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11325/** @note Not for IOPL or IF modification. */
11326#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11327/** @note Not for IOPL or IF modification. */
11328#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11329
11330#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11331
11332/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11333#define IEM_MC_FPU_TO_MMX_MODE() do { \
11334 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11335 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11336 } while (0)
11337
11338/** Switches the FPU state from MMX mode (FTW=0xffff). */
11339#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11340 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11341 } while (0)
11342
11343#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11344 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11345#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11346 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11347#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11348 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11349 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11350 } while (0)
11351#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11352 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11353 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11354 } while (0)
11355#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11356 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11357#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11358 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11359#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11360 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11361
11362#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11363 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11364 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11365 } while (0)
11366#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11367 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11368#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11369 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11370#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11371 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11372#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11373 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11374 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11375 } while (0)
11376#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11377 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11378#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11379 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11380 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11381 } while (0)
11382#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11383 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11384#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11385 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11386 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11387 } while (0)
11388#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11389 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11390#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11391 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11392#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11393 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11394#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11395 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11396#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11397 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11398 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11399 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11400 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11401 } while (0)
11402
11403#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11404 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11405 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11406 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11407 } while (0)
11408#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11409 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11410 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11411 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11412 } while (0)
11413#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11414 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11415 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11416 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11417 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11418 } while (0)
11419#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11420 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11421 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11422 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11423 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11424 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11425 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11426 } while (0)
11427
11428#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11429#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11430 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11431 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11432 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11433 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11434 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11435 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11436 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11437 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11438 } while (0)
11439#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11440 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11441 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11442 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11443 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11444 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11445 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11446 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11447 } while (0)
11448#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11449 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11450 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11451 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11452 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11453 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11454 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11455 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11456 } while (0)
11457#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11458 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11459 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11460 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11461 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11462 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11463 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11464 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11465 } while (0)
11466
11467#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11468 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11469#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11470 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11471#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11472 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11473#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11474 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11475 uintptr_t const iYRegTmp = (a_iYReg); \
11476 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11477 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11478 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11479 } while (0)
11480
11481#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11482 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11483 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11484 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11485 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11486 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11487 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11489 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11490 } while (0)
11491#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11492 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11493 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11494 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11495 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11496 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11497 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11498 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11499 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11500 } while (0)
11501#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11502 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11503 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11504 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11505 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11506 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11507 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11508 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11509 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11510 } while (0)
11511
11512#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11513 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11514 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11515 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11516 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11519 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11522 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11523 } while (0)
11524#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11525 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11526 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11527 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11528 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11533 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11534 } while (0)
11535#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11536 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11537 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11538 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11539 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11544 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11545 } while (0)
11546#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11547 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11548 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11549 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11555 } while (0)
11556
11557#ifndef IEM_WITH_SETJMP
11558# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11560# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11561 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11562# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11563 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11564#else
11565# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11566 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11567# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11568 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11569# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11570 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11571#endif
11572
11573#ifndef IEM_WITH_SETJMP
11574# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11576# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11577 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11578# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11580#else
11581# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11582 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11583# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11584 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11585# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11586 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11587#endif
11588
11589#ifndef IEM_WITH_SETJMP
11590# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11592# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11594# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11596#else
11597# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11598 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11599# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11600 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11601# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11602 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11603#endif
11604
11605#ifdef SOME_UNUSED_FUNCTION
11606# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11608#endif
11609
11610#ifndef IEM_WITH_SETJMP
11611# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11613# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11615# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11616 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11617# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11619#else
11620# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11621 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11622# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11623 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11624# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11625 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11626# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11627 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11628#endif
11629
11630#ifndef IEM_WITH_SETJMP
11631# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11632 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11633# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11634 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11637#else
11638# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11639 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11640# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11641 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11642# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11643 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11644#endif
11645
11646#ifndef IEM_WITH_SETJMP
11647# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11649# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11651#else
11652# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11653 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11654# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11655 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11656#endif
11657
11658#ifndef IEM_WITH_SETJMP
11659# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11661# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11663#else
11664# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11665 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11666# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11667 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11668#endif
11669
11670
11671
11672#ifndef IEM_WITH_SETJMP
11673# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11674 do { \
11675 uint8_t u8Tmp; \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11677 (a_u16Dst) = u8Tmp; \
11678 } while (0)
11679# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11680 do { \
11681 uint8_t u8Tmp; \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11683 (a_u32Dst) = u8Tmp; \
11684 } while (0)
11685# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11686 do { \
11687 uint8_t u8Tmp; \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11689 (a_u64Dst) = u8Tmp; \
11690 } while (0)
11691# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11692 do { \
11693 uint16_t u16Tmp; \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11695 (a_u32Dst) = u16Tmp; \
11696 } while (0)
11697# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11698 do { \
11699 uint16_t u16Tmp; \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11701 (a_u64Dst) = u16Tmp; \
11702 } while (0)
11703# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11704 do { \
11705 uint32_t u32Tmp; \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11707 (a_u64Dst) = u32Tmp; \
11708 } while (0)
11709#else /* IEM_WITH_SETJMP */
11710# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11715 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11716# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11717 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11718# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11719 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11720# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11721 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11722#endif /* IEM_WITH_SETJMP */
11723
11724#ifndef IEM_WITH_SETJMP
11725# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11726 do { \
11727 uint8_t u8Tmp; \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11729 (a_u16Dst) = (int8_t)u8Tmp; \
11730 } while (0)
11731# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11732 do { \
11733 uint8_t u8Tmp; \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11735 (a_u32Dst) = (int8_t)u8Tmp; \
11736 } while (0)
11737# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 do { \
11739 uint8_t u8Tmp; \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11741 (a_u64Dst) = (int8_t)u8Tmp; \
11742 } while (0)
11743# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11744 do { \
11745 uint16_t u16Tmp; \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11747 (a_u32Dst) = (int16_t)u16Tmp; \
11748 } while (0)
11749# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 do { \
11751 uint16_t u16Tmp; \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11753 (a_u64Dst) = (int16_t)u16Tmp; \
11754 } while (0)
11755# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11756 do { \
11757 uint32_t u32Tmp; \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11759 (a_u64Dst) = (int32_t)u32Tmp; \
11760 } while (0)
11761#else /* IEM_WITH_SETJMP */
11762# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11763 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11765 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11766# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11767 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11768# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11769 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11770# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11771 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11772# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11773 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11774#endif /* IEM_WITH_SETJMP */
11775
11776#ifndef IEM_WITH_SETJMP
11777# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11779# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11781# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11783# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11785#else
11786# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11787 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11788# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11789 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11790# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11791 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11792# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11793 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11794#endif
11795
11796#ifndef IEM_WITH_SETJMP
11797# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11799# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11800 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11801# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11802 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11803# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11804 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11805#else
11806# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11807 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11808# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11809 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11810# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11811 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11812# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11813 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11814#endif
11815
11816#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11817#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11818#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11819#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11820#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11821#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11822#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11823 do { \
11824 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11825 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11826 } while (0)
11827
11828#ifndef IEM_WITH_SETJMP
11829# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11830 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11831# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11833#else
11834# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11835 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11836# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11837 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11838#endif
11839
11840#ifndef IEM_WITH_SETJMP
11841# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11843# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11845#else
11846# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11847 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11848# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11849 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11850#endif
11851
11852
11853#define IEM_MC_PUSH_U16(a_u16Value) \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11855#define IEM_MC_PUSH_U32(a_u32Value) \
11856 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11857#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11859#define IEM_MC_PUSH_U64(a_u64Value) \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11861
11862#define IEM_MC_POP_U16(a_pu16Value) \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11864#define IEM_MC_POP_U32(a_pu32Value) \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11866#define IEM_MC_POP_U64(a_pu64Value) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11868
11869/** Maps guest memory for direct or bounce buffered access.
11870 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11871 * @remarks May return.
11872 */
11873#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11875
11876/** Maps guest memory for direct or bounce buffered access.
11877 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11878 * @remarks May return.
11879 */
11880#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11882
11883/** Commits the memory and unmaps the guest memory.
11884 * @remarks May return.
11885 */
11886#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11888
11889/** Commits the memory and unmaps the guest memory unless the FPU status word
11890 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11891 * that would cause FLD not to store.
11892 *
11893 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11894 * store, while \#P will not.
11895 *
11896 * @remarks May in theory return - for now.
11897 */
11898#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11899 do { \
11900 if ( !(a_u16FSW & X86_FSW_ES) \
11901 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11902 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11904 } while (0)
11905
11906/** Calculate efficient address from R/M. */
11907#ifndef IEM_WITH_SETJMP
11908# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11909 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11910#else
11911# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11912 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11913#endif
11914
11915#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11916#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11917#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11918#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11919#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11920#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11921#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11922
11923/**
11924 * Defers the rest of the instruction emulation to a C implementation routine
11925 * and returns, only taking the standard parameters.
11926 *
11927 * @param a_pfnCImpl The pointer to the C routine.
11928 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11929 */
11930#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11931
11932/**
11933 * Defers the rest of instruction emulation to a C implementation routine and
11934 * returns, taking one argument in addition to the standard ones.
11935 *
11936 * @param a_pfnCImpl The pointer to the C routine.
11937 * @param a0 The argument.
11938 */
11939#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11940
11941/**
11942 * Defers the rest of the instruction emulation to a C implementation routine
11943 * and returns, taking two arguments in addition to the standard ones.
11944 *
11945 * @param a_pfnCImpl The pointer to the C routine.
11946 * @param a0 The first extra argument.
11947 * @param a1 The second extra argument.
11948 */
11949#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11950
11951/**
11952 * Defers the rest of the instruction emulation to a C implementation routine
11953 * and returns, taking three arguments in addition to the standard ones.
11954 *
11955 * @param a_pfnCImpl The pointer to the C routine.
11956 * @param a0 The first extra argument.
11957 * @param a1 The second extra argument.
11958 * @param a2 The third extra argument.
11959 */
11960#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11961
11962/**
11963 * Defers the rest of the instruction emulation to a C implementation routine
11964 * and returns, taking four arguments in addition to the standard ones.
11965 *
11966 * @param a_pfnCImpl The pointer to the C routine.
11967 * @param a0 The first extra argument.
11968 * @param a1 The second extra argument.
11969 * @param a2 The third extra argument.
11970 * @param a3 The fourth extra argument.
11971 */
11972#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11973
11974/**
11975 * Defers the rest of the instruction emulation to a C implementation routine
11976 * and returns, taking two arguments in addition to the standard ones.
11977 *
11978 * @param a_pfnCImpl The pointer to the C routine.
11979 * @param a0 The first extra argument.
11980 * @param a1 The second extra argument.
11981 * @param a2 The third extra argument.
11982 * @param a3 The fourth extra argument.
11983 * @param a4 The fifth extra argument.
11984 */
11985#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11986
11987/**
11988 * Defers the entire instruction emulation to a C implementation routine and
11989 * returns, only taking the standard parameters.
11990 *
11991 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11992 *
11993 * @param a_pfnCImpl The pointer to the C routine.
11994 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11995 */
11996#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11997
11998/**
11999 * Defers the entire instruction emulation to a C implementation routine and
12000 * returns, taking one argument in addition to the standard ones.
12001 *
12002 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12003 *
12004 * @param a_pfnCImpl The pointer to the C routine.
12005 * @param a0 The argument.
12006 */
12007#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12008
12009/**
12010 * Defers the entire instruction emulation to a C implementation routine and
12011 * returns, taking two arguments in addition to the standard ones.
12012 *
12013 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12014 *
12015 * @param a_pfnCImpl The pointer to the C routine.
12016 * @param a0 The first extra argument.
12017 * @param a1 The second extra argument.
12018 */
12019#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12020
12021/**
12022 * Defers the entire instruction emulation to a C implementation routine and
12023 * returns, taking three arguments in addition to the standard ones.
12024 *
12025 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12026 *
12027 * @param a_pfnCImpl The pointer to the C routine.
12028 * @param a0 The first extra argument.
12029 * @param a1 The second extra argument.
12030 * @param a2 The third extra argument.
12031 */
12032#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12033
12034/**
12035 * Calls a FPU assembly implementation taking one visible argument.
12036 *
12037 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12038 * @param a0 The first extra argument.
12039 */
12040#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12041 do { \
12042 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12043 } while (0)
12044
12045/**
12046 * Calls a FPU assembly implementation taking two visible arguments.
12047 *
12048 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12049 * @param a0 The first extra argument.
12050 * @param a1 The second extra argument.
12051 */
12052#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12053 do { \
12054 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12055 } while (0)
12056
12057/**
12058 * Calls a FPU assembly implementation taking three visible arguments.
12059 *
12060 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12061 * @param a0 The first extra argument.
12062 * @param a1 The second extra argument.
12063 * @param a2 The third extra argument.
12064 */
12065#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12066 do { \
12067 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12068 } while (0)
12069
12070#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12071 do { \
12072 (a_FpuData).FSW = (a_FSW); \
12073 (a_FpuData).r80Result = *(a_pr80Value); \
12074 } while (0)
12075
12076/** Pushes FPU result onto the stack. */
12077#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12078 iemFpuPushResult(pVCpu, &a_FpuData)
12079/** Pushes FPU result onto the stack and sets the FPUDP. */
12080#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12081 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12082
12083/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12084#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12085 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12086
12087/** Stores FPU result in a stack register. */
12088#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12089 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12090/** Stores FPU result in a stack register and pops the stack. */
12091#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12092 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12093/** Stores FPU result in a stack register and sets the FPUDP. */
12094#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12095 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12096/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12097 * stack. */
12098#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12099 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12100
12101/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12102#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12103 iemFpuUpdateOpcodeAndIp(pVCpu)
12104/** Free a stack register (for FFREE and FFREEP). */
12105#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12106 iemFpuStackFree(pVCpu, a_iStReg)
12107/** Increment the FPU stack pointer. */
12108#define IEM_MC_FPU_STACK_INC_TOP() \
12109 iemFpuStackIncTop(pVCpu)
12110/** Decrement the FPU stack pointer. */
12111#define IEM_MC_FPU_STACK_DEC_TOP() \
12112 iemFpuStackDecTop(pVCpu)
12113
12114/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12115#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12116 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12117/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12118#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12119 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12120/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12121#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12122 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12123/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12124#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12125 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12126/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12127 * stack. */
12128#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12129 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12130/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12131#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12132 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12133
12134/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12135#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12136 iemFpuStackUnderflow(pVCpu, a_iStDst)
12137/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12138 * stack. */
12139#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12140 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12141/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12142 * FPUDS. */
12143#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12144 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12145/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12146 * FPUDS. Pops stack. */
12147#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12148 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12149/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12150 * stack twice. */
12151#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12152 iemFpuStackUnderflowThenPopPop(pVCpu)
12153/** Raises a FPU stack underflow exception for an instruction pushing a result
12154 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12155#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12156 iemFpuStackPushUnderflow(pVCpu)
12157/** Raises a FPU stack underflow exception for an instruction pushing a result
12158 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12159#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12160 iemFpuStackPushUnderflowTwo(pVCpu)
12161
12162/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12163 * FPUIP, FPUCS and FOP. */
12164#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12165 iemFpuStackPushOverflow(pVCpu)
12166/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12167 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12168#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12169 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12170/** Prepares for using the FPU state.
12171 * Ensures that we can use the host FPU in the current context (RC+R0.
12172 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12173#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12174/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12175#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12176/** Actualizes the guest FPU state so it can be accessed and modified. */
12177#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12178
12179/** Prepares for using the SSE state.
12180 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12181 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12182#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12183/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12184#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12185/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12186#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12187
12188/** Prepares for using the AVX state.
12189 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12190 * Ensures the guest AVX state in the CPUMCTX is up to date.
12191 * @note This will include the AVX512 state too when support for it is added
12192 * due to the zero extending feature of VEX instruction. */
12193#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12194/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12195#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12196/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12197#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12198
12199/**
12200 * Calls a MMX assembly implementation taking two visible arguments.
12201 *
12202 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12203 * @param a0 The first extra argument.
12204 * @param a1 The second extra argument.
12205 */
12206#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12207 do { \
12208 IEM_MC_PREPARE_FPU_USAGE(); \
12209 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12210 } while (0)
12211
12212/**
12213 * Calls a MMX assembly implementation taking three visible arguments.
12214 *
12215 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12216 * @param a0 The first extra argument.
12217 * @param a1 The second extra argument.
12218 * @param a2 The third extra argument.
12219 */
12220#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12221 do { \
12222 IEM_MC_PREPARE_FPU_USAGE(); \
12223 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12224 } while (0)
12225
12226
12227/**
12228 * Calls a SSE assembly implementation taking two visible arguments.
12229 *
12230 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12231 * @param a0 The first extra argument.
12232 * @param a1 The second extra argument.
12233 */
12234#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12235 do { \
12236 IEM_MC_PREPARE_SSE_USAGE(); \
12237 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12238 } while (0)
12239
12240/**
12241 * Calls a SSE assembly implementation taking three visible arguments.
12242 *
12243 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12244 * @param a0 The first extra argument.
12245 * @param a1 The second extra argument.
12246 * @param a2 The third extra argument.
12247 */
12248#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12249 do { \
12250 IEM_MC_PREPARE_SSE_USAGE(); \
12251 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12252 } while (0)
12253
12254
12255/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12256 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12257#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12258 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12259
12260/**
12261 * Calls a AVX assembly implementation taking two visible arguments.
12262 *
12263 * There is one implicit zero'th argument, a pointer to the extended state.
12264 *
12265 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12266 * @param a1 The first extra argument.
12267 * @param a2 The second extra argument.
12268 */
12269#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12270 do { \
12271 IEM_MC_PREPARE_AVX_USAGE(); \
12272 a_pfnAImpl(pXState, (a1), (a2)); \
12273 } while (0)
12274
12275/**
12276 * Calls a AVX assembly implementation taking three visible arguments.
12277 *
12278 * There is one implicit zero'th argument, a pointer to the extended state.
12279 *
12280 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12281 * @param a1 The first extra argument.
12282 * @param a2 The second extra argument.
12283 * @param a3 The third extra argument.
12284 */
12285#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12286 do { \
12287 IEM_MC_PREPARE_AVX_USAGE(); \
12288 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12289 } while (0)
12290
12291/** @note Not for IOPL or IF testing. */
12292#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12293/** @note Not for IOPL or IF testing. */
12294#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12295/** @note Not for IOPL or IF testing. */
12296#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12297/** @note Not for IOPL or IF testing. */
12298#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12299/** @note Not for IOPL or IF testing. */
12300#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12301 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12302 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12303/** @note Not for IOPL or IF testing. */
12304#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12305 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12306 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12307/** @note Not for IOPL or IF testing. */
12308#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12309 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12310 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12311 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12312/** @note Not for IOPL or IF testing. */
12313#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12314 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12315 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12316 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12317#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12318#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12319#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12320/** @note Not for IOPL or IF testing. */
12321#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12322 if ( pVCpu->cpum.GstCtx.cx != 0 \
12323 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12324/** @note Not for IOPL or IF testing. */
12325#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12326 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12327 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12328/** @note Not for IOPL or IF testing. */
12329#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12330 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12331 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12332/** @note Not for IOPL or IF testing. */
12333#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12334 if ( pVCpu->cpum.GstCtx.cx != 0 \
12335 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12338 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12339 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12342 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12343 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12344#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12345#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12346
12347#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12348 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12349#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12350 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12351#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12352 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12353#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12354 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12355#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12356 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12357#define IEM_MC_IF_FCW_IM() \
12358 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12359
12360#define IEM_MC_ELSE() } else {
12361#define IEM_MC_ENDIF() } do {} while (0)
12362
12363/** @} */
12364
12365
12366/** @name Opcode Debug Helpers.
12367 * @{
12368 */
12369#ifdef VBOX_WITH_STATISTICS
12370# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12371#else
12372# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12373#endif
12374
12375#ifdef DEBUG
12376# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12377 do { \
12378 IEMOP_INC_STATS(a_Stats); \
12379 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12380 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12381 } while (0)
12382
12383# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12384 do { \
12385 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12386 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12387 (void)RT_CONCAT(OP_,a_Upper); \
12388 (void)(a_fDisHints); \
12389 (void)(a_fIemHints); \
12390 } while (0)
12391
12392# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12393 do { \
12394 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12395 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12396 (void)RT_CONCAT(OP_,a_Upper); \
12397 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12398 (void)(a_fDisHints); \
12399 (void)(a_fIemHints); \
12400 } while (0)
12401
12402# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12403 do { \
12404 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12405 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12406 (void)RT_CONCAT(OP_,a_Upper); \
12407 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12408 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12409 (void)(a_fDisHints); \
12410 (void)(a_fIemHints); \
12411 } while (0)
12412
12413# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12414 do { \
12415 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12416 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12417 (void)RT_CONCAT(OP_,a_Upper); \
12418 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12419 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12420 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12421 (void)(a_fDisHints); \
12422 (void)(a_fIemHints); \
12423 } while (0)
12424
12425# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12426 do { \
12427 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12428 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12429 (void)RT_CONCAT(OP_,a_Upper); \
12430 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12431 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12432 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12433 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12434 (void)(a_fDisHints); \
12435 (void)(a_fIemHints); \
12436 } while (0)
12437
12438#else
12439# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12440
12441# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12442 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12443# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12444 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12445# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12446 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12447# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12448 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12449# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12450 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12451
12452#endif
12453
12454#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12455 IEMOP_MNEMONIC0EX(a_Lower, \
12456 #a_Lower, \
12457 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12458#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12459 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12460 #a_Lower " " #a_Op1, \
12461 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12462#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12463 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12464 #a_Lower " " #a_Op1 "," #a_Op2, \
12465 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12466#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12467 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12468 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12469 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12470#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12471 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12472 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12473 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12474
12475/** @} */
12476
12477
12478/** @name Opcode Helpers.
12479 * @{
12480 */
12481
12482#ifdef IN_RING3
12483# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12484 do { \
12485 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12486 else \
12487 { \
12488 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12489 return IEMOP_RAISE_INVALID_OPCODE(); \
12490 } \
12491 } while (0)
12492#else
12493# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12494 do { \
12495 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12496 else return IEMOP_RAISE_INVALID_OPCODE(); \
12497 } while (0)
12498#endif
12499
12500/** The instruction requires a 186 or later. */
12501#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12502# define IEMOP_HLP_MIN_186() do { } while (0)
12503#else
12504# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12505#endif
12506
12507/** The instruction requires a 286 or later. */
12508#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12509# define IEMOP_HLP_MIN_286() do { } while (0)
12510#else
12511# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12512#endif
12513
12514/** The instruction requires a 386 or later. */
12515#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12516# define IEMOP_HLP_MIN_386() do { } while (0)
12517#else
12518# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12519#endif
12520
12521/** The instruction requires a 386 or later if the given expression is true. */
12522#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12523# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12524#else
12525# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12526#endif
12527
12528/** The instruction requires a 486 or later. */
12529#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12530# define IEMOP_HLP_MIN_486() do { } while (0)
12531#else
12532# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12533#endif
12534
12535/** The instruction requires a Pentium (586) or later. */
12536#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12537# define IEMOP_HLP_MIN_586() do { } while (0)
12538#else
12539# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12540#endif
12541
12542/** The instruction requires a PentiumPro (686) or later. */
12543#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12544# define IEMOP_HLP_MIN_686() do { } while (0)
12545#else
12546# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12547#endif
12548
12549
12550/** The instruction raises an \#UD in real and V8086 mode. */
12551#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12552 do \
12553 { \
12554 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12555 else return IEMOP_RAISE_INVALID_OPCODE(); \
12556 } while (0)
12557
12558#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12559/** This instruction raises an \#UD in real and V8086 mode or when not using a
12560 * 64-bit code segment when in long mode (applicable to all VMX instructions
12561 * except VMCALL).
12562 */
12563#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12564 do \
12565 { \
12566 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12567 && ( !IEM_IS_LONG_MODE(pVCpu) \
12568 || IEM_IS_64BIT_CODE(pVCpu))) \
12569 { /* likely */ } \
12570 else \
12571 { \
12572 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12573 { \
12574 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12575 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12576 return IEMOP_RAISE_INVALID_OPCODE(); \
12577 } \
12578 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12579 { \
12580 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12581 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12582 return IEMOP_RAISE_INVALID_OPCODE(); \
12583 } \
12584 } \
12585 } while (0)
12586
12587/** The instruction can only be executed in VMX operation (VMX root mode and
12588 * non-root mode).
12589 *
12590 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12591 */
12592# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12593 do \
12594 { \
12595 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12596 else \
12597 { \
12598 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12599 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12600 return IEMOP_RAISE_INVALID_OPCODE(); \
12601 } \
12602 } while (0)
12603#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12604
12605/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12606 * 64-bit mode. */
12607#define IEMOP_HLP_NO_64BIT() \
12608 do \
12609 { \
12610 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12611 return IEMOP_RAISE_INVALID_OPCODE(); \
12612 } while (0)
12613
12614/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12615 * 64-bit mode. */
12616#define IEMOP_HLP_ONLY_64BIT() \
12617 do \
12618 { \
12619 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12620 return IEMOP_RAISE_INVALID_OPCODE(); \
12621 } while (0)
12622
12623/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12624#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12625 do \
12626 { \
12627 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12628 iemRecalEffOpSize64Default(pVCpu); \
12629 } while (0)
12630
12631/** The instruction has 64-bit operand size if 64-bit mode. */
12632#define IEMOP_HLP_64BIT_OP_SIZE() \
12633 do \
12634 { \
12635 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12636 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12637 } while (0)
12638
12639/** Only a REX prefix immediately preceeding the first opcode byte takes
12640 * effect. This macro helps ensuring this as well as logging bad guest code. */
12641#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12642 do \
12643 { \
12644 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12645 { \
12646 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12647 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12648 pVCpu->iem.s.uRexB = 0; \
12649 pVCpu->iem.s.uRexIndex = 0; \
12650 pVCpu->iem.s.uRexReg = 0; \
12651 iemRecalEffOpSize(pVCpu); \
12652 } \
12653 } while (0)
12654
12655/**
12656 * Done decoding.
12657 */
12658#define IEMOP_HLP_DONE_DECODING() \
12659 do \
12660 { \
12661 /*nothing for now, maybe later... */ \
12662 } while (0)
12663
12664/**
12665 * Done decoding, raise \#UD exception if lock prefix present.
12666 */
12667#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12668 do \
12669 { \
12670 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12671 { /* likely */ } \
12672 else \
12673 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12674 } while (0)
12675
12676
12677/**
12678 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12679 * repnz or size prefixes are present, or if in real or v8086 mode.
12680 */
12681#define IEMOP_HLP_DONE_VEX_DECODING() \
12682 do \
12683 { \
12684 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12685 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12686 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12687 { /* likely */ } \
12688 else \
12689 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12690 } while (0)
12691
12692/**
12693 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12694 * repnz or size prefixes are present, or if in real or v8086 mode.
12695 */
12696#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12697 do \
12698 { \
12699 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12700 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12701 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12702 && pVCpu->iem.s.uVexLength == 0)) \
12703 { /* likely */ } \
12704 else \
12705 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12706 } while (0)
12707
12708
12709/**
12710 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12711 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12712 * register 0, or if in real or v8086 mode.
12713 */
12714#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12715 do \
12716 { \
12717 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12718 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12719 && !pVCpu->iem.s.uVex3rdReg \
12720 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12721 { /* likely */ } \
12722 else \
12723 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12724 } while (0)
12725
12726/**
12727 * Done decoding VEX, no V, L=0.
12728 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12729 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12730 */
12731#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12732 do \
12733 { \
12734 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12735 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12736 && pVCpu->iem.s.uVexLength == 0 \
12737 && pVCpu->iem.s.uVex3rdReg == 0 \
12738 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12739 { /* likely */ } \
12740 else \
12741 return IEMOP_RAISE_INVALID_OPCODE(); \
12742 } while (0)
12743
12744#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12745 do \
12746 { \
12747 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12748 { /* likely */ } \
12749 else \
12750 { \
12751 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12752 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12753 } \
12754 } while (0)
12755#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12756 do \
12757 { \
12758 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12759 { /* likely */ } \
12760 else \
12761 { \
12762 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12763 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12764 } \
12765 } while (0)
12766
12767/**
12768 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12769 * are present.
12770 */
12771#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12772 do \
12773 { \
12774 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12775 { /* likely */ } \
12776 else \
12777 return IEMOP_RAISE_INVALID_OPCODE(); \
12778 } while (0)
12779
12780/**
12781 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12782 * prefixes are present.
12783 */
12784#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12785 do \
12786 { \
12787 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12788 { /* likely */ } \
12789 else \
12790 return IEMOP_RAISE_INVALID_OPCODE(); \
12791 } while (0)
12792
12793
12794/**
12795 * Calculates the effective address of a ModR/M memory operand.
12796 *
12797 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12798 *
12799 * @return Strict VBox status code.
12800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12801 * @param bRm The ModRM byte.
12802 * @param cbImm The size of any immediate following the
12803 * effective address opcode bytes. Important for
12804 * RIP relative addressing.
12805 * @param pGCPtrEff Where to return the effective address.
12806 */
12807IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12808{
12809 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12810# define SET_SS_DEF() \
12811 do \
12812 { \
12813 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12814 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12815 } while (0)
12816
12817 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12818 {
12819/** @todo Check the effective address size crap! */
12820 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12821 {
12822 uint16_t u16EffAddr;
12823
12824 /* Handle the disp16 form with no registers first. */
12825 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12826 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12827 else
12828 {
12829 /* Get the displacment. */
12830 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12831 {
12832 case 0: u16EffAddr = 0; break;
12833 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12834 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12835 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12836 }
12837
12838 /* Add the base and index registers to the disp. */
12839 switch (bRm & X86_MODRM_RM_MASK)
12840 {
12841 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12842 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12843 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12844 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12845 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12846 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12847 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12848 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12849 }
12850 }
12851
12852 *pGCPtrEff = u16EffAddr;
12853 }
12854 else
12855 {
12856 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12857 uint32_t u32EffAddr;
12858
12859 /* Handle the disp32 form with no registers first. */
12860 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12861 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12862 else
12863 {
12864 /* Get the register (or SIB) value. */
12865 switch ((bRm & X86_MODRM_RM_MASK))
12866 {
12867 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12868 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12869 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12870 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12871 case 4: /* SIB */
12872 {
12873 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12874
12875 /* Get the index and scale it. */
12876 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12877 {
12878 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12879 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12880 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12881 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12882 case 4: u32EffAddr = 0; /*none */ break;
12883 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12884 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12885 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12886 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12887 }
12888 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12889
12890 /* add base */
12891 switch (bSib & X86_SIB_BASE_MASK)
12892 {
12893 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12894 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12895 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12896 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12897 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12898 case 5:
12899 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12900 {
12901 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12902 SET_SS_DEF();
12903 }
12904 else
12905 {
12906 uint32_t u32Disp;
12907 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12908 u32EffAddr += u32Disp;
12909 }
12910 break;
12911 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12912 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12914 }
12915 break;
12916 }
12917 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12918 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12919 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12920 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12921 }
12922
12923 /* Get and add the displacement. */
12924 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12925 {
12926 case 0:
12927 break;
12928 case 1:
12929 {
12930 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12931 u32EffAddr += i8Disp;
12932 break;
12933 }
12934 case 2:
12935 {
12936 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12937 u32EffAddr += u32Disp;
12938 break;
12939 }
12940 default:
12941 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12942 }
12943
12944 }
12945 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12946 *pGCPtrEff = u32EffAddr;
12947 else
12948 {
12949 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12950 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12951 }
12952 }
12953 }
12954 else
12955 {
12956 uint64_t u64EffAddr;
12957
12958 /* Handle the rip+disp32 form with no registers first. */
12959 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12960 {
12961 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12962 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12963 }
12964 else
12965 {
12966 /* Get the register (or SIB) value. */
12967 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12968 {
12969 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12970 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12971 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12972 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12973 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12974 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12975 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12976 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12977 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12978 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12979 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12980 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12981 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12982 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12983 /* SIB */
12984 case 4:
12985 case 12:
12986 {
12987 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12988
12989 /* Get the index and scale it. */
12990 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12991 {
12992 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12993 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12994 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12995 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12996 case 4: u64EffAddr = 0; /*none */ break;
12997 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12998 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12999 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13000 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13001 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13002 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13003 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13004 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13005 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13006 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13007 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13009 }
13010 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13011
13012 /* add base */
13013 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13014 {
13015 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13016 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13017 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13018 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13019 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13020 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13021 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13022 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13023 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13024 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13025 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13026 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13027 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13028 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13029 /* complicated encodings */
13030 case 5:
13031 case 13:
13032 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13033 {
13034 if (!pVCpu->iem.s.uRexB)
13035 {
13036 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13037 SET_SS_DEF();
13038 }
13039 else
13040 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13041 }
13042 else
13043 {
13044 uint32_t u32Disp;
13045 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13046 u64EffAddr += (int32_t)u32Disp;
13047 }
13048 break;
13049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13050 }
13051 break;
13052 }
13053 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13054 }
13055
13056 /* Get and add the displacement. */
13057 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13058 {
13059 case 0:
13060 break;
13061 case 1:
13062 {
13063 int8_t i8Disp;
13064 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13065 u64EffAddr += i8Disp;
13066 break;
13067 }
13068 case 2:
13069 {
13070 uint32_t u32Disp;
13071 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13072 u64EffAddr += (int32_t)u32Disp;
13073 break;
13074 }
13075 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13076 }
13077
13078 }
13079
13080 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13081 *pGCPtrEff = u64EffAddr;
13082 else
13083 {
13084 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13085 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13086 }
13087 }
13088
13089 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13090 return VINF_SUCCESS;
13091}
13092
13093
13094/**
13095 * Calculates the effective address of a ModR/M memory operand.
13096 *
13097 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13098 *
13099 * @return Strict VBox status code.
13100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13101 * @param bRm The ModRM byte.
13102 * @param cbImm The size of any immediate following the
13103 * effective address opcode bytes. Important for
13104 * RIP relative addressing.
13105 * @param pGCPtrEff Where to return the effective address.
13106 * @param offRsp RSP displacement.
13107 */
13108IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13109{
13110 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13111# define SET_SS_DEF() \
13112 do \
13113 { \
13114 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13115 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13116 } while (0)
13117
13118 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13119 {
13120/** @todo Check the effective address size crap! */
13121 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13122 {
13123 uint16_t u16EffAddr;
13124
13125 /* Handle the disp16 form with no registers first. */
13126 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13127 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13128 else
13129 {
13130 /* Get the displacment. */
13131 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13132 {
13133 case 0: u16EffAddr = 0; break;
13134 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13135 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13136 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13137 }
13138
13139 /* Add the base and index registers to the disp. */
13140 switch (bRm & X86_MODRM_RM_MASK)
13141 {
13142 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13143 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13144 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13145 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13146 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13147 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13148 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13149 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13150 }
13151 }
13152
13153 *pGCPtrEff = u16EffAddr;
13154 }
13155 else
13156 {
13157 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13158 uint32_t u32EffAddr;
13159
13160 /* Handle the disp32 form with no registers first. */
13161 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13162 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13163 else
13164 {
13165 /* Get the register (or SIB) value. */
13166 switch ((bRm & X86_MODRM_RM_MASK))
13167 {
13168 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13169 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13170 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13171 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13172 case 4: /* SIB */
13173 {
13174 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13175
13176 /* Get the index and scale it. */
13177 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13178 {
13179 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13180 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13181 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13182 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13183 case 4: u32EffAddr = 0; /*none */ break;
13184 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13185 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13186 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13188 }
13189 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13190
13191 /* add base */
13192 switch (bSib & X86_SIB_BASE_MASK)
13193 {
13194 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13195 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13196 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13197 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13198 case 4:
13199 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13200 SET_SS_DEF();
13201 break;
13202 case 5:
13203 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13204 {
13205 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13206 SET_SS_DEF();
13207 }
13208 else
13209 {
13210 uint32_t u32Disp;
13211 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13212 u32EffAddr += u32Disp;
13213 }
13214 break;
13215 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13216 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13217 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13218 }
13219 break;
13220 }
13221 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13222 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13223 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13224 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13225 }
13226
13227 /* Get and add the displacement. */
13228 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13229 {
13230 case 0:
13231 break;
13232 case 1:
13233 {
13234 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13235 u32EffAddr += i8Disp;
13236 break;
13237 }
13238 case 2:
13239 {
13240 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13241 u32EffAddr += u32Disp;
13242 break;
13243 }
13244 default:
13245 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13246 }
13247
13248 }
13249 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13250 *pGCPtrEff = u32EffAddr;
13251 else
13252 {
13253 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13254 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13255 }
13256 }
13257 }
13258 else
13259 {
13260 uint64_t u64EffAddr;
13261
13262 /* Handle the rip+disp32 form with no registers first. */
13263 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13264 {
13265 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13266 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13267 }
13268 else
13269 {
13270 /* Get the register (or SIB) value. */
13271 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13272 {
13273 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13274 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13275 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13276 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13277 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13278 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13279 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13280 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13281 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13282 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13283 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13284 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13285 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13286 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13287 /* SIB */
13288 case 4:
13289 case 12:
13290 {
13291 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13292
13293 /* Get the index and scale it. */
13294 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13295 {
13296 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13297 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13298 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13299 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13300 case 4: u64EffAddr = 0; /*none */ break;
13301 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13302 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13303 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13304 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13305 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13306 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13307 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13308 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13309 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13310 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13311 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13313 }
13314 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13315
13316 /* add base */
13317 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13318 {
13319 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13320 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13321 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13322 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13323 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13324 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13325 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13326 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13327 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13328 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13329 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13330 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13331 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13332 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13333 /* complicated encodings */
13334 case 5:
13335 case 13:
13336 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13337 {
13338 if (!pVCpu->iem.s.uRexB)
13339 {
13340 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13341 SET_SS_DEF();
13342 }
13343 else
13344 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13345 }
13346 else
13347 {
13348 uint32_t u32Disp;
13349 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13350 u64EffAddr += (int32_t)u32Disp;
13351 }
13352 break;
13353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13354 }
13355 break;
13356 }
13357 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13358 }
13359
13360 /* Get and add the displacement. */
13361 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13362 {
13363 case 0:
13364 break;
13365 case 1:
13366 {
13367 int8_t i8Disp;
13368 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13369 u64EffAddr += i8Disp;
13370 break;
13371 }
13372 case 2:
13373 {
13374 uint32_t u32Disp;
13375 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13376 u64EffAddr += (int32_t)u32Disp;
13377 break;
13378 }
13379 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13380 }
13381
13382 }
13383
13384 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13385 *pGCPtrEff = u64EffAddr;
13386 else
13387 {
13388 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13389 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13390 }
13391 }
13392
13393 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13394 return VINF_SUCCESS;
13395}
13396
13397
13398#ifdef IEM_WITH_SETJMP
13399/**
13400 * Calculates the effective address of a ModR/M memory operand.
13401 *
13402 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13403 *
13404 * May longjmp on internal error.
13405 *
13406 * @return The effective address.
13407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13408 * @param bRm The ModRM byte.
13409 * @param cbImm The size of any immediate following the
13410 * effective address opcode bytes. Important for
13411 * RIP relative addressing.
13412 */
13413IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13414{
13415 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13416# define SET_SS_DEF() \
13417 do \
13418 { \
13419 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13420 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13421 } while (0)
13422
13423 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13424 {
13425/** @todo Check the effective address size crap! */
13426 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13427 {
13428 uint16_t u16EffAddr;
13429
13430 /* Handle the disp16 form with no registers first. */
13431 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13432 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13433 else
13434 {
13435 /* Get the displacment. */
13436 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13437 {
13438 case 0: u16EffAddr = 0; break;
13439 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13440 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13441 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13442 }
13443
13444 /* Add the base and index registers to the disp. */
13445 switch (bRm & X86_MODRM_RM_MASK)
13446 {
13447 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13448 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13449 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13450 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13451 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13452 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13453 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13454 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13455 }
13456 }
13457
13458 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13459 return u16EffAddr;
13460 }
13461
13462 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13463 uint32_t u32EffAddr;
13464
13465 /* Handle the disp32 form with no registers first. */
13466 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13467 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13468 else
13469 {
13470 /* Get the register (or SIB) value. */
13471 switch ((bRm & X86_MODRM_RM_MASK))
13472 {
13473 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13474 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13475 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13476 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13477 case 4: /* SIB */
13478 {
13479 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13480
13481 /* Get the index and scale it. */
13482 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13483 {
13484 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13485 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13486 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13487 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13488 case 4: u32EffAddr = 0; /*none */ break;
13489 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13490 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13491 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13492 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13493 }
13494 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13495
13496 /* add base */
13497 switch (bSib & X86_SIB_BASE_MASK)
13498 {
13499 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13500 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13501 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13502 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13503 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13504 case 5:
13505 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13506 {
13507 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13508 SET_SS_DEF();
13509 }
13510 else
13511 {
13512 uint32_t u32Disp;
13513 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13514 u32EffAddr += u32Disp;
13515 }
13516 break;
13517 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13518 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13519 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13520 }
13521 break;
13522 }
13523 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13524 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13525 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13526 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13527 }
13528
13529 /* Get and add the displacement. */
13530 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13531 {
13532 case 0:
13533 break;
13534 case 1:
13535 {
13536 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13537 u32EffAddr += i8Disp;
13538 break;
13539 }
13540 case 2:
13541 {
13542 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13543 u32EffAddr += u32Disp;
13544 break;
13545 }
13546 default:
13547 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13548 }
13549 }
13550
13551 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13552 {
13553 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13554 return u32EffAddr;
13555 }
13556 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13557 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13558 return u32EffAddr & UINT16_MAX;
13559 }
13560
13561 uint64_t u64EffAddr;
13562
13563 /* Handle the rip+disp32 form with no registers first. */
13564 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13565 {
13566 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13567 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13568 }
13569 else
13570 {
13571 /* Get the register (or SIB) value. */
13572 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13573 {
13574 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13575 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13576 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13577 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13578 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13579 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13580 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13581 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13582 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13583 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13584 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13585 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13586 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13587 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13588 /* SIB */
13589 case 4:
13590 case 12:
13591 {
13592 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13593
13594 /* Get the index and scale it. */
13595 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13596 {
13597 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13598 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13599 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13600 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13601 case 4: u64EffAddr = 0; /*none */ break;
13602 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13603 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13604 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13605 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13606 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13607 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13608 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13609 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13610 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13611 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13612 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13613 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13614 }
13615 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13616
13617 /* add base */
13618 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13619 {
13620 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13621 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13622 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13623 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13624 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13625 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13626 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13627 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13628 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13629 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13630 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13631 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13632 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13633 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13634 /* complicated encodings */
13635 case 5:
13636 case 13:
13637 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13638 {
13639 if (!pVCpu->iem.s.uRexB)
13640 {
13641 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13642 SET_SS_DEF();
13643 }
13644 else
13645 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13646 }
13647 else
13648 {
13649 uint32_t u32Disp;
13650 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13651 u64EffAddr += (int32_t)u32Disp;
13652 }
13653 break;
13654 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13655 }
13656 break;
13657 }
13658 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13659 }
13660
13661 /* Get and add the displacement. */
13662 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13663 {
13664 case 0:
13665 break;
13666 case 1:
13667 {
13668 int8_t i8Disp;
13669 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13670 u64EffAddr += i8Disp;
13671 break;
13672 }
13673 case 2:
13674 {
13675 uint32_t u32Disp;
13676 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13677 u64EffAddr += (int32_t)u32Disp;
13678 break;
13679 }
13680 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13681 }
13682
13683 }
13684
13685 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13686 {
13687 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13688 return u64EffAddr;
13689 }
13690 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13691 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13692 return u64EffAddr & UINT32_MAX;
13693}
13694#endif /* IEM_WITH_SETJMP */
13695
13696/** @} */
13697
13698
13699
13700/*
13701 * Include the instructions
13702 */
13703#include "IEMAllInstructions.cpp.h"
13704
13705
13706
13707#ifdef LOG_ENABLED
13708/**
13709 * Logs the current instruction.
13710 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13711 * @param fSameCtx Set if we have the same context information as the VMM,
13712 * clear if we may have already executed an instruction in
13713 * our debug context. When clear, we assume IEMCPU holds
13714 * valid CPU mode info.
13715 *
13716 * The @a fSameCtx parameter is now misleading and obsolete.
13717 * @param pszFunction The IEM function doing the execution.
13718 */
13719IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13720{
13721# ifdef IN_RING3
13722 if (LogIs2Enabled())
13723 {
13724 char szInstr[256];
13725 uint32_t cbInstr = 0;
13726 if (fSameCtx)
13727 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13728 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13729 szInstr, sizeof(szInstr), &cbInstr);
13730 else
13731 {
13732 uint32_t fFlags = 0;
13733 switch (pVCpu->iem.s.enmCpuMode)
13734 {
13735 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13736 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13737 case IEMMODE_16BIT:
13738 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13739 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13740 else
13741 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13742 break;
13743 }
13744 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13745 szInstr, sizeof(szInstr), &cbInstr);
13746 }
13747
13748 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13749 Log2(("**** %s\n"
13750 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13751 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13752 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13753 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13754 " %s\n"
13755 , pszFunction,
13756 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13757 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13758 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13759 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13760 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13761 szInstr));
13762
13763 if (LogIs3Enabled())
13764 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13765 }
13766 else
13767# endif
13768 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13769 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13770 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13771}
13772#endif /* LOG_ENABLED */
13773
13774
13775/**
13776 * Makes status code addjustments (pass up from I/O and access handler)
13777 * as well as maintaining statistics.
13778 *
13779 * @returns Strict VBox status code to pass up.
13780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13781 * @param rcStrict The status from executing an instruction.
13782 */
13783DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13784{
13785 if (rcStrict != VINF_SUCCESS)
13786 {
13787 if (RT_SUCCESS(rcStrict))
13788 {
13789 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13790 || rcStrict == VINF_IOM_R3_IOPORT_READ
13791 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13792 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13793 || rcStrict == VINF_IOM_R3_MMIO_READ
13794 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13795 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13796 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13797 || rcStrict == VINF_CPUM_R3_MSR_READ
13798 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13799 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13800 || rcStrict == VINF_EM_RAW_TO_R3
13801 || rcStrict == VINF_EM_TRIPLE_FAULT
13802 || rcStrict == VINF_GIM_R3_HYPERCALL
13803 /* raw-mode / virt handlers only: */
13804 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13805 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13806 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13807 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13808 || rcStrict == VINF_SELM_SYNC_GDT
13809 || rcStrict == VINF_CSAM_PENDING_ACTION
13810 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13811 /* nested hw.virt codes: */
13812 || rcStrict == VINF_VMX_VMEXIT
13813 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13814 || rcStrict == VINF_SVM_VMEXIT
13815 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13816/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13817 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13818#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13819 if ( rcStrict == VINF_VMX_VMEXIT
13820 && rcPassUp == VINF_SUCCESS)
13821 rcStrict = VINF_SUCCESS;
13822 else
13823#endif
13824#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13825 if ( rcStrict == VINF_SVM_VMEXIT
13826 && rcPassUp == VINF_SUCCESS)
13827 rcStrict = VINF_SUCCESS;
13828 else
13829#endif
13830 if (rcPassUp == VINF_SUCCESS)
13831 pVCpu->iem.s.cRetInfStatuses++;
13832 else if ( rcPassUp < VINF_EM_FIRST
13833 || rcPassUp > VINF_EM_LAST
13834 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13835 {
13836 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13837 pVCpu->iem.s.cRetPassUpStatus++;
13838 rcStrict = rcPassUp;
13839 }
13840 else
13841 {
13842 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13843 pVCpu->iem.s.cRetInfStatuses++;
13844 }
13845 }
13846 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13847 pVCpu->iem.s.cRetAspectNotImplemented++;
13848 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13849 pVCpu->iem.s.cRetInstrNotImplemented++;
13850 else
13851 pVCpu->iem.s.cRetErrStatuses++;
13852 }
13853 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13854 {
13855 pVCpu->iem.s.cRetPassUpStatus++;
13856 rcStrict = pVCpu->iem.s.rcPassUp;
13857 }
13858
13859 return rcStrict;
13860}
13861
13862
13863/**
13864 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13865 * IEMExecOneWithPrefetchedByPC.
13866 *
13867 * Similar code is found in IEMExecLots.
13868 *
13869 * @return Strict VBox status code.
13870 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13871 * @param fExecuteInhibit If set, execute the instruction following CLI,
13872 * POP SS and MOV SS,GR.
13873 * @param pszFunction The calling function name.
13874 */
13875DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13876{
13877 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13878 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13879 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13880 RT_NOREF_PV(pszFunction);
13881
13882#ifdef IEM_WITH_SETJMP
13883 VBOXSTRICTRC rcStrict;
13884 jmp_buf JmpBuf;
13885 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13886 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13887 if ((rcStrict = setjmp(JmpBuf)) == 0)
13888 {
13889 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13890 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13891 }
13892 else
13893 pVCpu->iem.s.cLongJumps++;
13894 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13895#else
13896 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13897 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13898#endif
13899 if (rcStrict == VINF_SUCCESS)
13900 pVCpu->iem.s.cInstructions++;
13901 if (pVCpu->iem.s.cActiveMappings > 0)
13902 {
13903 Assert(rcStrict != VINF_SUCCESS);
13904 iemMemRollback(pVCpu);
13905 }
13906 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13907 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13908 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13909
13910//#ifdef DEBUG
13911// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13912//#endif
13913
13914#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13915 /*
13916 * Perform any VMX nested-guest instruction boundary actions.
13917 *
13918 * If any of these causes a VM-exit, we must skip executing the next
13919 * instruction (would run into stale page tables). A VM-exit makes sure
13920 * there is no interrupt-inhibition, so that should ensure we don't go
13921 * to try execute the next instruction. Clearing fExecuteInhibit is
13922 * problematic because of the setjmp/longjmp clobbering above.
13923 */
13924 if ( rcStrict == VINF_SUCCESS
13925 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
13926 {
13927 bool fCheckRemainingIntercepts = true;
13928 /* TPR-below threshold/APIC write has the highest priority. */
13929 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13930 {
13931 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13932 fCheckRemainingIntercepts = false;
13933 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13934 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13935 }
13936 /* MTF takes priority over VMX-preemption timer. */
13937 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
13938 {
13939 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13940 fCheckRemainingIntercepts = false;
13941 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13942 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13943 }
13944 /* VMX preemption timer takes priority over NMI-window exits. */
13945 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13946 {
13947 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13948 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
13949 rcStrict = VINF_SUCCESS;
13950 else
13951 {
13952 fCheckRemainingIntercepts = false;
13953 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13954 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13955 }
13956 }
13957
13958 /*
13959 * Check remaining intercepts.
13960 *
13961 * NMI-window and Interrupt-window VM-exits.
13962 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13963 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13964 *
13965 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13966 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13967 */
13968 if ( fCheckRemainingIntercepts
13969 && !TRPMHasTrap(pVCpu)
13970 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
13971 {
13972 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
13973 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13974 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
13975 {
13976 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13977 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13978 }
13979 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13980 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
13981 {
13982 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13983 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13984 }
13985 }
13986 }
13987#endif
13988
13989 /* Execute the next instruction as well if a cli, pop ss or
13990 mov ss, Gr has just completed successfully. */
13991 if ( fExecuteInhibit
13992 && rcStrict == VINF_SUCCESS
13993 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13994 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13995 {
13996 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13997 if (rcStrict == VINF_SUCCESS)
13998 {
13999#ifdef LOG_ENABLED
14000 iemLogCurInstr(pVCpu, false, pszFunction);
14001#endif
14002#ifdef IEM_WITH_SETJMP
14003 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14004 if ((rcStrict = setjmp(JmpBuf)) == 0)
14005 {
14006 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14007 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14008 }
14009 else
14010 pVCpu->iem.s.cLongJumps++;
14011 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14012#else
14013 IEM_OPCODE_GET_NEXT_U8(&b);
14014 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14015#endif
14016 if (rcStrict == VINF_SUCCESS)
14017 pVCpu->iem.s.cInstructions++;
14018 if (pVCpu->iem.s.cActiveMappings > 0)
14019 {
14020 Assert(rcStrict != VINF_SUCCESS);
14021 iemMemRollback(pVCpu);
14022 }
14023 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14024 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14025 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14026 }
14027 else if (pVCpu->iem.s.cActiveMappings > 0)
14028 iemMemRollback(pVCpu);
14029 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14030 }
14031
14032 /*
14033 * Return value fiddling, statistics and sanity assertions.
14034 */
14035 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14036
14037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14039 return rcStrict;
14040}
14041
14042
14043/**
14044 * Execute one instruction.
14045 *
14046 * @return Strict VBox status code.
14047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14048 */
14049VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14050{
14051#ifdef LOG_ENABLED
14052 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14053#endif
14054
14055 /*
14056 * Do the decoding and emulation.
14057 */
14058 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14059 if (rcStrict == VINF_SUCCESS)
14060 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14061 else if (pVCpu->iem.s.cActiveMappings > 0)
14062 iemMemRollback(pVCpu);
14063
14064 if (rcStrict != VINF_SUCCESS)
14065 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14067 return rcStrict;
14068}
14069
14070
14071VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14072{
14073 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14074
14075 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14076 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14077 if (rcStrict == VINF_SUCCESS)
14078 {
14079 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14080 if (pcbWritten)
14081 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14082 }
14083 else if (pVCpu->iem.s.cActiveMappings > 0)
14084 iemMemRollback(pVCpu);
14085
14086 return rcStrict;
14087}
14088
14089
14090VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14091 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14092{
14093 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14094
14095 VBOXSTRICTRC rcStrict;
14096 if ( cbOpcodeBytes
14097 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14098 {
14099 iemInitDecoder(pVCpu, false);
14100#ifdef IEM_WITH_CODE_TLB
14101 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14102 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14103 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14104 pVCpu->iem.s.offCurInstrStart = 0;
14105 pVCpu->iem.s.offInstrNextByte = 0;
14106#else
14107 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14108 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14109#endif
14110 rcStrict = VINF_SUCCESS;
14111 }
14112 else
14113 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14114 if (rcStrict == VINF_SUCCESS)
14115 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14116 else if (pVCpu->iem.s.cActiveMappings > 0)
14117 iemMemRollback(pVCpu);
14118
14119 return rcStrict;
14120}
14121
14122
14123VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14124{
14125 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14126
14127 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14128 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14129 if (rcStrict == VINF_SUCCESS)
14130 {
14131 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14132 if (pcbWritten)
14133 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14134 }
14135 else if (pVCpu->iem.s.cActiveMappings > 0)
14136 iemMemRollback(pVCpu);
14137
14138 return rcStrict;
14139}
14140
14141
14142VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14143 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14144{
14145 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14146
14147 VBOXSTRICTRC rcStrict;
14148 if ( cbOpcodeBytes
14149 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14150 {
14151 iemInitDecoder(pVCpu, true);
14152#ifdef IEM_WITH_CODE_TLB
14153 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14154 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14155 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14156 pVCpu->iem.s.offCurInstrStart = 0;
14157 pVCpu->iem.s.offInstrNextByte = 0;
14158#else
14159 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14160 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14161#endif
14162 rcStrict = VINF_SUCCESS;
14163 }
14164 else
14165 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14166 if (rcStrict == VINF_SUCCESS)
14167 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14168 else if (pVCpu->iem.s.cActiveMappings > 0)
14169 iemMemRollback(pVCpu);
14170
14171 return rcStrict;
14172}
14173
14174
14175/**
14176 * For debugging DISGetParamSize, may come in handy.
14177 *
14178 * @returns Strict VBox status code.
14179 * @param pVCpu The cross context virtual CPU structure of the
14180 * calling EMT.
14181 * @param pCtxCore The context core structure.
14182 * @param OpcodeBytesPC The PC of the opcode bytes.
14183 * @param pvOpcodeBytes Prefeched opcode bytes.
14184 * @param cbOpcodeBytes Number of prefetched bytes.
14185 * @param pcbWritten Where to return the number of bytes written.
14186 * Optional.
14187 */
14188VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14189 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14190 uint32_t *pcbWritten)
14191{
14192 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14193
14194 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14195 VBOXSTRICTRC rcStrict;
14196 if ( cbOpcodeBytes
14197 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14198 {
14199 iemInitDecoder(pVCpu, true);
14200#ifdef IEM_WITH_CODE_TLB
14201 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14202 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14203 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14204 pVCpu->iem.s.offCurInstrStart = 0;
14205 pVCpu->iem.s.offInstrNextByte = 0;
14206#else
14207 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14208 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14209#endif
14210 rcStrict = VINF_SUCCESS;
14211 }
14212 else
14213 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14214 if (rcStrict == VINF_SUCCESS)
14215 {
14216 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14217 if (pcbWritten)
14218 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14219 }
14220 else if (pVCpu->iem.s.cActiveMappings > 0)
14221 iemMemRollback(pVCpu);
14222
14223 return rcStrict;
14224}
14225
14226
14227VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14228{
14229 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14230 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14231
14232 /*
14233 * See if there is an interrupt pending in TRPM, inject it if we can.
14234 */
14235 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14236#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14237 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14238 if (fIntrEnabled)
14239 {
14240 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14241 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14242 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14243 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14244 else
14245 {
14246 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14247 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14248 }
14249 }
14250#else
14251 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14252#endif
14253
14254 /** @todo What if we are injecting an exception and not an interrupt? Is that
14255 * possible here? */
14256 if ( fIntrEnabled
14257 && TRPMHasTrap(pVCpu)
14258 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14259 {
14260 uint8_t u8TrapNo;
14261 TRPMEVENT enmType;
14262 RTGCUINT uErrCode;
14263 RTGCPTR uCr2;
14264 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14265 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14266 TRPMResetTrap(pVCpu);
14267#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14268 /* Injecting an event may cause a VM-exit. */
14269 if ( rcStrict != VINF_SUCCESS
14270 && rcStrict != VINF_IEM_RAISED_XCPT)
14271 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14272#else
14273 NOREF(rcStrict);
14274#endif
14275 }
14276
14277 /*
14278 * Initial decoder init w/ prefetch, then setup setjmp.
14279 */
14280 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14281 if (rcStrict == VINF_SUCCESS)
14282 {
14283#ifdef IEM_WITH_SETJMP
14284 jmp_buf JmpBuf;
14285 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14286 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14287 pVCpu->iem.s.cActiveMappings = 0;
14288 if ((rcStrict = setjmp(JmpBuf)) == 0)
14289#endif
14290 {
14291 /*
14292 * The run loop. We limit ourselves to 4096 instructions right now.
14293 */
14294 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14295 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14296 for (;;)
14297 {
14298 /*
14299 * Log the state.
14300 */
14301#ifdef LOG_ENABLED
14302 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14303#endif
14304
14305 /*
14306 * Do the decoding and emulation.
14307 */
14308 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14309 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14310 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14311 {
14312 Assert(pVCpu->iem.s.cActiveMappings == 0);
14313 pVCpu->iem.s.cInstructions++;
14314 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14315 {
14316 uint64_t fCpu = pVCpu->fLocalForcedActions
14317 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14318 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14319 | VMCPU_FF_TLB_FLUSH
14320 | VMCPU_FF_INHIBIT_INTERRUPTS
14321 | VMCPU_FF_BLOCK_NMIS
14322 | VMCPU_FF_UNHALT ));
14323
14324 if (RT_LIKELY( ( !fCpu
14325 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14326 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14327 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14328 {
14329 if (cMaxInstructionsGccStupidity-- > 0)
14330 {
14331 /* Poll timers every now an then according to the caller's specs. */
14332 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14333 || !TMTimerPollBool(pVM, pVCpu))
14334 {
14335 Assert(pVCpu->iem.s.cActiveMappings == 0);
14336 iemReInitDecoder(pVCpu);
14337 continue;
14338 }
14339 }
14340 }
14341 }
14342 Assert(pVCpu->iem.s.cActiveMappings == 0);
14343 }
14344 else if (pVCpu->iem.s.cActiveMappings > 0)
14345 iemMemRollback(pVCpu);
14346 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14347 break;
14348 }
14349 }
14350#ifdef IEM_WITH_SETJMP
14351 else
14352 {
14353 if (pVCpu->iem.s.cActiveMappings > 0)
14354 iemMemRollback(pVCpu);
14355# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14356 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14357# endif
14358 pVCpu->iem.s.cLongJumps++;
14359 }
14360 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14361#endif
14362
14363 /*
14364 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14365 */
14366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14367 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14368 }
14369 else
14370 {
14371 if (pVCpu->iem.s.cActiveMappings > 0)
14372 iemMemRollback(pVCpu);
14373
14374#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14375 /*
14376 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14377 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14378 */
14379 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14380#endif
14381 }
14382
14383 /*
14384 * Maybe re-enter raw-mode and log.
14385 */
14386 if (rcStrict != VINF_SUCCESS)
14387 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14388 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14389 if (pcInstructions)
14390 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14391 return rcStrict;
14392}
14393
14394
14395/**
14396 * Interface used by EMExecuteExec, does exit statistics and limits.
14397 *
14398 * @returns Strict VBox status code.
14399 * @param pVCpu The cross context virtual CPU structure.
14400 * @param fWillExit To be defined.
14401 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14402 * @param cMaxInstructions Maximum number of instructions to execute.
14403 * @param cMaxInstructionsWithoutExits
14404 * The max number of instructions without exits.
14405 * @param pStats Where to return statistics.
14406 */
14407VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14408 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14409{
14410 NOREF(fWillExit); /** @todo define flexible exit crits */
14411
14412 /*
14413 * Initialize return stats.
14414 */
14415 pStats->cInstructions = 0;
14416 pStats->cExits = 0;
14417 pStats->cMaxExitDistance = 0;
14418 pStats->cReserved = 0;
14419
14420 /*
14421 * Initial decoder init w/ prefetch, then setup setjmp.
14422 */
14423 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14424 if (rcStrict == VINF_SUCCESS)
14425 {
14426#ifdef IEM_WITH_SETJMP
14427 jmp_buf JmpBuf;
14428 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14429 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14430 pVCpu->iem.s.cActiveMappings = 0;
14431 if ((rcStrict = setjmp(JmpBuf)) == 0)
14432#endif
14433 {
14434#ifdef IN_RING0
14435 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14436#endif
14437 uint32_t cInstructionSinceLastExit = 0;
14438
14439 /*
14440 * The run loop. We limit ourselves to 4096 instructions right now.
14441 */
14442 PVM pVM = pVCpu->CTX_SUFF(pVM);
14443 for (;;)
14444 {
14445 /*
14446 * Log the state.
14447 */
14448#ifdef LOG_ENABLED
14449 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14450#endif
14451
14452 /*
14453 * Do the decoding and emulation.
14454 */
14455 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14456
14457 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14458 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14459
14460 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14461 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14462 {
14463 pStats->cExits += 1;
14464 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14465 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14466 cInstructionSinceLastExit = 0;
14467 }
14468
14469 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14470 {
14471 Assert(pVCpu->iem.s.cActiveMappings == 0);
14472 pVCpu->iem.s.cInstructions++;
14473 pStats->cInstructions++;
14474 cInstructionSinceLastExit++;
14475 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14476 {
14477 uint64_t fCpu = pVCpu->fLocalForcedActions
14478 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14479 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14480 | VMCPU_FF_TLB_FLUSH
14481 | VMCPU_FF_INHIBIT_INTERRUPTS
14482 | VMCPU_FF_BLOCK_NMIS
14483 | VMCPU_FF_UNHALT ));
14484
14485 if (RT_LIKELY( ( ( !fCpu
14486 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14487 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14488 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14489 || pStats->cInstructions < cMinInstructions))
14490 {
14491 if (pStats->cInstructions < cMaxInstructions)
14492 {
14493 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14494 {
14495#ifdef IN_RING0
14496 if ( !fCheckPreemptionPending
14497 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14498#endif
14499 {
14500 Assert(pVCpu->iem.s.cActiveMappings == 0);
14501 iemReInitDecoder(pVCpu);
14502 continue;
14503 }
14504#ifdef IN_RING0
14505 rcStrict = VINF_EM_RAW_INTERRUPT;
14506 break;
14507#endif
14508 }
14509 }
14510 }
14511 Assert(!(fCpu & VMCPU_FF_IEM));
14512 }
14513 Assert(pVCpu->iem.s.cActiveMappings == 0);
14514 }
14515 else if (pVCpu->iem.s.cActiveMappings > 0)
14516 iemMemRollback(pVCpu);
14517 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14518 break;
14519 }
14520 }
14521#ifdef IEM_WITH_SETJMP
14522 else
14523 {
14524 if (pVCpu->iem.s.cActiveMappings > 0)
14525 iemMemRollback(pVCpu);
14526 pVCpu->iem.s.cLongJumps++;
14527 }
14528 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14529#endif
14530
14531 /*
14532 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14533 */
14534 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14535 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14536 }
14537 else
14538 {
14539 if (pVCpu->iem.s.cActiveMappings > 0)
14540 iemMemRollback(pVCpu);
14541
14542#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14543 /*
14544 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14545 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14546 */
14547 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14548#endif
14549 }
14550
14551 /*
14552 * Maybe re-enter raw-mode and log.
14553 */
14554 if (rcStrict != VINF_SUCCESS)
14555 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14556 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14557 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14558 return rcStrict;
14559}
14560
14561
14562/**
14563 * Injects a trap, fault, abort, software interrupt or external interrupt.
14564 *
14565 * The parameter list matches TRPMQueryTrapAll pretty closely.
14566 *
14567 * @returns Strict VBox status code.
14568 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14569 * @param u8TrapNo The trap number.
14570 * @param enmType What type is it (trap/fault/abort), software
14571 * interrupt or hardware interrupt.
14572 * @param uErrCode The error code if applicable.
14573 * @param uCr2 The CR2 value if applicable.
14574 * @param cbInstr The instruction length (only relevant for
14575 * software interrupts).
14576 */
14577VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14578 uint8_t cbInstr)
14579{
14580 iemInitDecoder(pVCpu, false);
14581#ifdef DBGFTRACE_ENABLED
14582 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14583 u8TrapNo, enmType, uErrCode, uCr2);
14584#endif
14585
14586 uint32_t fFlags;
14587 switch (enmType)
14588 {
14589 case TRPM_HARDWARE_INT:
14590 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14591 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14592 uErrCode = uCr2 = 0;
14593 break;
14594
14595 case TRPM_SOFTWARE_INT:
14596 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14597 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14598 uErrCode = uCr2 = 0;
14599 break;
14600
14601 case TRPM_TRAP:
14602 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14603 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14604 if (u8TrapNo == X86_XCPT_PF)
14605 fFlags |= IEM_XCPT_FLAGS_CR2;
14606 switch (u8TrapNo)
14607 {
14608 case X86_XCPT_DF:
14609 case X86_XCPT_TS:
14610 case X86_XCPT_NP:
14611 case X86_XCPT_SS:
14612 case X86_XCPT_PF:
14613 case X86_XCPT_AC:
14614 fFlags |= IEM_XCPT_FLAGS_ERR;
14615 break;
14616 }
14617 break;
14618
14619 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14620 }
14621
14622 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14623
14624 if (pVCpu->iem.s.cActiveMappings > 0)
14625 iemMemRollback(pVCpu);
14626
14627 return rcStrict;
14628}
14629
14630
14631/**
14632 * Injects the active TRPM event.
14633 *
14634 * @returns Strict VBox status code.
14635 * @param pVCpu The cross context virtual CPU structure.
14636 */
14637VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14638{
14639#ifndef IEM_IMPLEMENTS_TASKSWITCH
14640 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14641#else
14642 uint8_t u8TrapNo;
14643 TRPMEVENT enmType;
14644 RTGCUINT uErrCode;
14645 RTGCUINTPTR uCr2;
14646 uint8_t cbInstr;
14647 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14648 if (RT_FAILURE(rc))
14649 return rc;
14650
14651 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14652#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14653 if (rcStrict == VINF_SVM_VMEXIT)
14654 rcStrict = VINF_SUCCESS;
14655#endif
14656#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14657 if (rcStrict == VINF_VMX_VMEXIT)
14658 rcStrict = VINF_SUCCESS;
14659#endif
14660 /** @todo Are there any other codes that imply the event was successfully
14661 * delivered to the guest? See @bugref{6607}. */
14662 if ( rcStrict == VINF_SUCCESS
14663 || rcStrict == VINF_IEM_RAISED_XCPT)
14664 TRPMResetTrap(pVCpu);
14665
14666 return rcStrict;
14667#endif
14668}
14669
14670
14671VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14672{
14673 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14674 return VERR_NOT_IMPLEMENTED;
14675}
14676
14677
14678VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14679{
14680 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14681 return VERR_NOT_IMPLEMENTED;
14682}
14683
14684
14685#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14686/**
14687 * Executes a IRET instruction with default operand size.
14688 *
14689 * This is for PATM.
14690 *
14691 * @returns VBox status code.
14692 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14693 * @param pCtxCore The register frame.
14694 */
14695VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14696{
14697 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14698
14699 iemCtxCoreToCtx(pCtx, pCtxCore);
14700 iemInitDecoder(pVCpu);
14701 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14702 if (rcStrict == VINF_SUCCESS)
14703 iemCtxToCtxCore(pCtxCore, pCtx);
14704 else
14705 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14706 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14707 return rcStrict;
14708}
14709#endif
14710
14711
14712/**
14713 * Macro used by the IEMExec* method to check the given instruction length.
14714 *
14715 * Will return on failure!
14716 *
14717 * @param a_cbInstr The given instruction length.
14718 * @param a_cbMin The minimum length.
14719 */
14720#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14721 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14722 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14723
14724
14725/**
14726 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14727 *
14728 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14729 *
14730 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14732 * @param rcStrict The status code to fiddle.
14733 */
14734DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14735{
14736 iemUninitExec(pVCpu);
14737 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14738}
14739
14740
14741/**
14742 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14743 *
14744 * This API ASSUMES that the caller has already verified that the guest code is
14745 * allowed to access the I/O port. (The I/O port is in the DX register in the
14746 * guest state.)
14747 *
14748 * @returns Strict VBox status code.
14749 * @param pVCpu The cross context virtual CPU structure.
14750 * @param cbValue The size of the I/O port access (1, 2, or 4).
14751 * @param enmAddrMode The addressing mode.
14752 * @param fRepPrefix Indicates whether a repeat prefix is used
14753 * (doesn't matter which for this instruction).
14754 * @param cbInstr The instruction length in bytes.
14755 * @param iEffSeg The effective segment address.
14756 * @param fIoChecked Whether the access to the I/O port has been
14757 * checked or not. It's typically checked in the
14758 * HM scenario.
14759 */
14760VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14761 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14762{
14763 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14764 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14765
14766 /*
14767 * State init.
14768 */
14769 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14770
14771 /*
14772 * Switch orgy for getting to the right handler.
14773 */
14774 VBOXSTRICTRC rcStrict;
14775 if (fRepPrefix)
14776 {
14777 switch (enmAddrMode)
14778 {
14779 case IEMMODE_16BIT:
14780 switch (cbValue)
14781 {
14782 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14783 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14784 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14785 default:
14786 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14787 }
14788 break;
14789
14790 case IEMMODE_32BIT:
14791 switch (cbValue)
14792 {
14793 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14794 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14795 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14796 default:
14797 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14798 }
14799 break;
14800
14801 case IEMMODE_64BIT:
14802 switch (cbValue)
14803 {
14804 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14805 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14806 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14807 default:
14808 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14809 }
14810 break;
14811
14812 default:
14813 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14814 }
14815 }
14816 else
14817 {
14818 switch (enmAddrMode)
14819 {
14820 case IEMMODE_16BIT:
14821 switch (cbValue)
14822 {
14823 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14824 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14825 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14826 default:
14827 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14828 }
14829 break;
14830
14831 case IEMMODE_32BIT:
14832 switch (cbValue)
14833 {
14834 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14835 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14836 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14837 default:
14838 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14839 }
14840 break;
14841
14842 case IEMMODE_64BIT:
14843 switch (cbValue)
14844 {
14845 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14846 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14847 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14848 default:
14849 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14850 }
14851 break;
14852
14853 default:
14854 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14855 }
14856 }
14857
14858 if (pVCpu->iem.s.cActiveMappings)
14859 iemMemRollback(pVCpu);
14860
14861 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14862}
14863
14864
14865/**
14866 * Interface for HM and EM for executing string I/O IN (read) instructions.
14867 *
14868 * This API ASSUMES that the caller has already verified that the guest code is
14869 * allowed to access the I/O port. (The I/O port is in the DX register in the
14870 * guest state.)
14871 *
14872 * @returns Strict VBox status code.
14873 * @param pVCpu The cross context virtual CPU structure.
14874 * @param cbValue The size of the I/O port access (1, 2, or 4).
14875 * @param enmAddrMode The addressing mode.
14876 * @param fRepPrefix Indicates whether a repeat prefix is used
14877 * (doesn't matter which for this instruction).
14878 * @param cbInstr The instruction length in bytes.
14879 * @param fIoChecked Whether the access to the I/O port has been
14880 * checked or not. It's typically checked in the
14881 * HM scenario.
14882 */
14883VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14884 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14885{
14886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14887
14888 /*
14889 * State init.
14890 */
14891 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14892
14893 /*
14894 * Switch orgy for getting to the right handler.
14895 */
14896 VBOXSTRICTRC rcStrict;
14897 if (fRepPrefix)
14898 {
14899 switch (enmAddrMode)
14900 {
14901 case IEMMODE_16BIT:
14902 switch (cbValue)
14903 {
14904 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14905 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14906 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14907 default:
14908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14909 }
14910 break;
14911
14912 case IEMMODE_32BIT:
14913 switch (cbValue)
14914 {
14915 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14916 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14917 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14918 default:
14919 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14920 }
14921 break;
14922
14923 case IEMMODE_64BIT:
14924 switch (cbValue)
14925 {
14926 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14927 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14928 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14929 default:
14930 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14931 }
14932 break;
14933
14934 default:
14935 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14936 }
14937 }
14938 else
14939 {
14940 switch (enmAddrMode)
14941 {
14942 case IEMMODE_16BIT:
14943 switch (cbValue)
14944 {
14945 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14946 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14947 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14948 default:
14949 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14950 }
14951 break;
14952
14953 case IEMMODE_32BIT:
14954 switch (cbValue)
14955 {
14956 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14957 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14958 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14959 default:
14960 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14961 }
14962 break;
14963
14964 case IEMMODE_64BIT:
14965 switch (cbValue)
14966 {
14967 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14968 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14969 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14970 default:
14971 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14972 }
14973 break;
14974
14975 default:
14976 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14977 }
14978 }
14979
14980 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14981 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14982}
14983
14984
14985/**
14986 * Interface for rawmode to write execute an OUT instruction.
14987 *
14988 * @returns Strict VBox status code.
14989 * @param pVCpu The cross context virtual CPU structure.
14990 * @param cbInstr The instruction length in bytes.
14991 * @param u16Port The port to read.
14992 * @param fImm Whether the port is specified using an immediate operand or
14993 * using the implicit DX register.
14994 * @param cbReg The register size.
14995 *
14996 * @remarks In ring-0 not all of the state needs to be synced in.
14997 */
14998VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
14999{
15000 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15001 Assert(cbReg <= 4 && cbReg != 3);
15002
15003 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15004 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15005 Assert(!pVCpu->iem.s.cActiveMappings);
15006 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15007}
15008
15009
15010/**
15011 * Interface for rawmode to write execute an IN instruction.
15012 *
15013 * @returns Strict VBox status code.
15014 * @param pVCpu The cross context virtual CPU structure.
15015 * @param cbInstr The instruction length in bytes.
15016 * @param u16Port The port to read.
15017 * @param fImm Whether the port is specified using an immediate operand or
15018 * using the implicit DX.
15019 * @param cbReg The register size.
15020 */
15021VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15022{
15023 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15024 Assert(cbReg <= 4 && cbReg != 3);
15025
15026 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15027 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15028 Assert(!pVCpu->iem.s.cActiveMappings);
15029 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15030}
15031
15032
15033/**
15034 * Interface for HM and EM to write to a CRx register.
15035 *
15036 * @returns Strict VBox status code.
15037 * @param pVCpu The cross context virtual CPU structure.
15038 * @param cbInstr The instruction length in bytes.
15039 * @param iCrReg The control register number (destination).
15040 * @param iGReg The general purpose register number (source).
15041 *
15042 * @remarks In ring-0 not all of the state needs to be synced in.
15043 */
15044VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15045{
15046 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15047 Assert(iCrReg < 16);
15048 Assert(iGReg < 16);
15049
15050 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15051 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15052 Assert(!pVCpu->iem.s.cActiveMappings);
15053 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15054}
15055
15056
15057/**
15058 * Interface for HM and EM to read from a CRx register.
15059 *
15060 * @returns Strict VBox status code.
15061 * @param pVCpu The cross context virtual CPU structure.
15062 * @param cbInstr The instruction length in bytes.
15063 * @param iGReg The general purpose register number (destination).
15064 * @param iCrReg The control register number (source).
15065 *
15066 * @remarks In ring-0 not all of the state needs to be synced in.
15067 */
15068VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15069{
15070 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15071 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15072 | CPUMCTX_EXTRN_APIC_TPR);
15073 Assert(iCrReg < 16);
15074 Assert(iGReg < 16);
15075
15076 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15078 Assert(!pVCpu->iem.s.cActiveMappings);
15079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15080}
15081
15082
15083/**
15084 * Interface for HM and EM to clear the CR0[TS] bit.
15085 *
15086 * @returns Strict VBox status code.
15087 * @param pVCpu The cross context virtual CPU structure.
15088 * @param cbInstr The instruction length in bytes.
15089 *
15090 * @remarks In ring-0 not all of the state needs to be synced in.
15091 */
15092VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15093{
15094 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15095
15096 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15097 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15098 Assert(!pVCpu->iem.s.cActiveMappings);
15099 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15100}
15101
15102
15103/**
15104 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15105 *
15106 * @returns Strict VBox status code.
15107 * @param pVCpu The cross context virtual CPU structure.
15108 * @param cbInstr The instruction length in bytes.
15109 * @param uValue The value to load into CR0.
15110 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15111 * memory operand. Otherwise pass NIL_RTGCPTR.
15112 *
15113 * @remarks In ring-0 not all of the state needs to be synced in.
15114 */
15115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15116{
15117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15118
15119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15121 Assert(!pVCpu->iem.s.cActiveMappings);
15122 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15123}
15124
15125
15126/**
15127 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15128 *
15129 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15130 *
15131 * @returns Strict VBox status code.
15132 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15133 * @param cbInstr The instruction length in bytes.
15134 * @remarks In ring-0 not all of the state needs to be synced in.
15135 * @thread EMT(pVCpu)
15136 */
15137VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15138{
15139 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15140
15141 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15142 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15143 Assert(!pVCpu->iem.s.cActiveMappings);
15144 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15145}
15146
15147
15148/**
15149 * Interface for HM and EM to emulate the WBINVD instruction.
15150 *
15151 * @returns Strict VBox status code.
15152 * @param pVCpu The cross context virtual CPU structure.
15153 * @param cbInstr The instruction length in bytes.
15154 *
15155 * @remarks In ring-0 not all of the state needs to be synced in.
15156 */
15157VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15158{
15159 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15160
15161 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15162 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15163 Assert(!pVCpu->iem.s.cActiveMappings);
15164 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15165}
15166
15167
15168/**
15169 * Interface for HM and EM to emulate the INVD instruction.
15170 *
15171 * @returns Strict VBox status code.
15172 * @param pVCpu The cross context virtual CPU structure.
15173 * @param cbInstr The instruction length in bytes.
15174 *
15175 * @remarks In ring-0 not all of the state needs to be synced in.
15176 */
15177VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15178{
15179 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15180
15181 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15182 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15183 Assert(!pVCpu->iem.s.cActiveMappings);
15184 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15185}
15186
15187
15188/**
15189 * Interface for HM and EM to emulate the INVLPG instruction.
15190 *
15191 * @returns Strict VBox status code.
15192 * @retval VINF_PGM_SYNC_CR3
15193 *
15194 * @param pVCpu The cross context virtual CPU structure.
15195 * @param cbInstr The instruction length in bytes.
15196 * @param GCPtrPage The effective address of the page to invalidate.
15197 *
15198 * @remarks In ring-0 not all of the state needs to be synced in.
15199 */
15200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15201{
15202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15203
15204 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15206 Assert(!pVCpu->iem.s.cActiveMappings);
15207 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15208}
15209
15210
15211/**
15212 * Interface for HM and EM to emulate the CPUID instruction.
15213 *
15214 * @returns Strict VBox status code.
15215 *
15216 * @param pVCpu The cross context virtual CPU structure.
15217 * @param cbInstr The instruction length in bytes.
15218 *
15219 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15220 */
15221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15222{
15223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15224 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15225
15226 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15228 Assert(!pVCpu->iem.s.cActiveMappings);
15229 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15230}
15231
15232
15233/**
15234 * Interface for HM and EM to emulate the RDPMC instruction.
15235 *
15236 * @returns Strict VBox status code.
15237 *
15238 * @param pVCpu The cross context virtual CPU structure.
15239 * @param cbInstr The instruction length in bytes.
15240 *
15241 * @remarks Not all of the state needs to be synced in.
15242 */
15243VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15244{
15245 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15246 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15247
15248 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15249 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15250 Assert(!pVCpu->iem.s.cActiveMappings);
15251 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15252}
15253
15254
15255/**
15256 * Interface for HM and EM to emulate the RDTSC instruction.
15257 *
15258 * @returns Strict VBox status code.
15259 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15260 *
15261 * @param pVCpu The cross context virtual CPU structure.
15262 * @param cbInstr The instruction length in bytes.
15263 *
15264 * @remarks Not all of the state needs to be synced in.
15265 */
15266VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15267{
15268 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15269 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15270
15271 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15272 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15273 Assert(!pVCpu->iem.s.cActiveMappings);
15274 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15275}
15276
15277
15278/**
15279 * Interface for HM and EM to emulate the RDTSCP instruction.
15280 *
15281 * @returns Strict VBox status code.
15282 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15283 *
15284 * @param pVCpu The cross context virtual CPU structure.
15285 * @param cbInstr The instruction length in bytes.
15286 *
15287 * @remarks Not all of the state needs to be synced in. Recommended
15288 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15289 */
15290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15291{
15292 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15293 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15294
15295 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15296 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15297 Assert(!pVCpu->iem.s.cActiveMappings);
15298 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15299}
15300
15301
15302/**
15303 * Interface for HM and EM to emulate the RDMSR instruction.
15304 *
15305 * @returns Strict VBox status code.
15306 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15307 *
15308 * @param pVCpu The cross context virtual CPU structure.
15309 * @param cbInstr The instruction length in bytes.
15310 *
15311 * @remarks Not all of the state needs to be synced in. Requires RCX and
15312 * (currently) all MSRs.
15313 */
15314VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15315{
15316 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15317 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15318
15319 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15320 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15321 Assert(!pVCpu->iem.s.cActiveMappings);
15322 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15323}
15324
15325
15326/**
15327 * Interface for HM and EM to emulate the WRMSR instruction.
15328 *
15329 * @returns Strict VBox status code.
15330 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15331 *
15332 * @param pVCpu The cross context virtual CPU structure.
15333 * @param cbInstr The instruction length in bytes.
15334 *
15335 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15336 * and (currently) all MSRs.
15337 */
15338VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15339{
15340 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15341 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15342 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15343
15344 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15345 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15346 Assert(!pVCpu->iem.s.cActiveMappings);
15347 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15348}
15349
15350
15351/**
15352 * Interface for HM and EM to emulate the MONITOR instruction.
15353 *
15354 * @returns Strict VBox status code.
15355 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15356 *
15357 * @param pVCpu The cross context virtual CPU structure.
15358 * @param cbInstr The instruction length in bytes.
15359 *
15360 * @remarks Not all of the state needs to be synced in.
15361 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15362 * are used.
15363 */
15364VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15365{
15366 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15367 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15368
15369 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15370 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15371 Assert(!pVCpu->iem.s.cActiveMappings);
15372 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15373}
15374
15375
15376/**
15377 * Interface for HM and EM to emulate the MWAIT instruction.
15378 *
15379 * @returns Strict VBox status code.
15380 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15381 *
15382 * @param pVCpu The cross context virtual CPU structure.
15383 * @param cbInstr The instruction length in bytes.
15384 *
15385 * @remarks Not all of the state needs to be synced in.
15386 */
15387VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15388{
15389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15390 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15391
15392 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15394 Assert(!pVCpu->iem.s.cActiveMappings);
15395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15396}
15397
15398
15399/**
15400 * Interface for HM and EM to emulate the HLT instruction.
15401 *
15402 * @returns Strict VBox status code.
15403 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15404 *
15405 * @param pVCpu The cross context virtual CPU structure.
15406 * @param cbInstr The instruction length in bytes.
15407 *
15408 * @remarks Not all of the state needs to be synced in.
15409 */
15410VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15411{
15412 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15413
15414 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15416 Assert(!pVCpu->iem.s.cActiveMappings);
15417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15418}
15419
15420
15421/**
15422 * Checks if IEM is in the process of delivering an event (interrupt or
15423 * exception).
15424 *
15425 * @returns true if we're in the process of raising an interrupt or exception,
15426 * false otherwise.
15427 * @param pVCpu The cross context virtual CPU structure.
15428 * @param puVector Where to store the vector associated with the
15429 * currently delivered event, optional.
15430 * @param pfFlags Where to store th event delivery flags (see
15431 * IEM_XCPT_FLAGS_XXX), optional.
15432 * @param puErr Where to store the error code associated with the
15433 * event, optional.
15434 * @param puCr2 Where to store the CR2 associated with the event,
15435 * optional.
15436 * @remarks The caller should check the flags to determine if the error code and
15437 * CR2 are valid for the event.
15438 */
15439VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15440{
15441 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15442 if (fRaisingXcpt)
15443 {
15444 if (puVector)
15445 *puVector = pVCpu->iem.s.uCurXcpt;
15446 if (pfFlags)
15447 *pfFlags = pVCpu->iem.s.fCurXcpt;
15448 if (puErr)
15449 *puErr = pVCpu->iem.s.uCurXcptErr;
15450 if (puCr2)
15451 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15452 }
15453 return fRaisingXcpt;
15454}
15455
15456#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15457
15458/**
15459 * Interface for HM and EM to emulate the CLGI instruction.
15460 *
15461 * @returns Strict VBox status code.
15462 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15463 * @param cbInstr The instruction length in bytes.
15464 * @thread EMT(pVCpu)
15465 */
15466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15467{
15468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15469
15470 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15471 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15472 Assert(!pVCpu->iem.s.cActiveMappings);
15473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15474}
15475
15476
15477/**
15478 * Interface for HM and EM to emulate the STGI instruction.
15479 *
15480 * @returns Strict VBox status code.
15481 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15482 * @param cbInstr The instruction length in bytes.
15483 * @thread EMT(pVCpu)
15484 */
15485VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15486{
15487 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15488
15489 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15490 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15491 Assert(!pVCpu->iem.s.cActiveMappings);
15492 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15493}
15494
15495
15496/**
15497 * Interface for HM and EM to emulate the VMLOAD instruction.
15498 *
15499 * @returns Strict VBox status code.
15500 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15501 * @param cbInstr The instruction length in bytes.
15502 * @thread EMT(pVCpu)
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15507
15508 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15509 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15510 Assert(!pVCpu->iem.s.cActiveMappings);
15511 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15512}
15513
15514
15515/**
15516 * Interface for HM and EM to emulate the VMSAVE instruction.
15517 *
15518 * @returns Strict VBox status code.
15519 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15520 * @param cbInstr The instruction length in bytes.
15521 * @thread EMT(pVCpu)
15522 */
15523VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15524{
15525 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15526
15527 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15529 Assert(!pVCpu->iem.s.cActiveMappings);
15530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15531}
15532
15533
15534/**
15535 * Interface for HM and EM to emulate the INVLPGA instruction.
15536 *
15537 * @returns Strict VBox status code.
15538 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15539 * @param cbInstr The instruction length in bytes.
15540 * @thread EMT(pVCpu)
15541 */
15542VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15543{
15544 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15545
15546 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15547 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15548 Assert(!pVCpu->iem.s.cActiveMappings);
15549 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15550}
15551
15552
15553/**
15554 * Interface for HM and EM to emulate the VMRUN instruction.
15555 *
15556 * @returns Strict VBox status code.
15557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15558 * @param cbInstr The instruction length in bytes.
15559 * @thread EMT(pVCpu)
15560 */
15561VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15562{
15563 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15564 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15565
15566 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15567 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15568 Assert(!pVCpu->iem.s.cActiveMappings);
15569 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15570}
15571
15572
15573/**
15574 * Interface for HM and EM to emulate \#VMEXIT.
15575 *
15576 * @returns Strict VBox status code.
15577 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15578 * @param uExitCode The exit code.
15579 * @param uExitInfo1 The exit info. 1 field.
15580 * @param uExitInfo2 The exit info. 2 field.
15581 * @thread EMT(pVCpu)
15582 */
15583VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15584{
15585 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15586 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15587 if (pVCpu->iem.s.cActiveMappings)
15588 iemMemRollback(pVCpu);
15589 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15590}
15591
15592#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15593
15594#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15595
15596/**
15597 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15598 *
15599 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15600 * are performed. Bounds checks are strict builds only.
15601 *
15602 * @param pVmcs Pointer to the virtual VMCS.
15603 * @param u64VmcsField The VMCS field.
15604 * @param pu64Dst Where to store the VMCS value.
15605 *
15606 * @remarks May be called with interrupts disabled.
15607 * @todo This should probably be moved to CPUM someday.
15608 */
15609VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15610{
15611 AssertPtr(pVmcs);
15612 AssertPtr(pu64Dst);
15613 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15614}
15615
15616
15617/**
15618 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15619 *
15620 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15621 * are performed. Bounds checks are strict builds only.
15622 *
15623 * @param pVmcs Pointer to the virtual VMCS.
15624 * @param u64VmcsField The VMCS field.
15625 * @param u64Val The value to write.
15626 *
15627 * @remarks May be called with interrupts disabled.
15628 * @todo This should probably be moved to CPUM someday.
15629 */
15630VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15631{
15632 AssertPtr(pVmcs);
15633 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15634}
15635
15636
15637/**
15638 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15639 *
15640 * @returns Strict VBox status code.
15641 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15642 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15643 * the x2APIC device.
15644 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15645 *
15646 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15647 * @param idMsr The MSR being read.
15648 * @param pu64Value Pointer to the value being written or where to store the
15649 * value being read.
15650 * @param fWrite Whether this is an MSR write or read access.
15651 * @thread EMT(pVCpu)
15652 */
15653VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15654{
15655 Assert(pu64Value);
15656
15657 VBOXSTRICTRC rcStrict;
15658 if (fWrite)
15659 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15660 else
15661 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15662 Assert(!pVCpu->iem.s.cActiveMappings);
15663 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15664
15665}
15666
15667
15668/**
15669 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15670 *
15671 * @returns Strict VBox status code.
15672 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15673 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15674 *
15675 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15676 * @param pExitInfo Pointer to the VM-exit information.
15677 * @param pExitEventInfo Pointer to the VM-exit event information.
15678 * @thread EMT(pVCpu)
15679 */
15680VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15681{
15682 Assert(pExitInfo);
15683 Assert(pExitEventInfo);
15684 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15685 Assert(!pVCpu->iem.s.cActiveMappings);
15686 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15687
15688}
15689
15690
15691/**
15692 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15693 * VM-exit.
15694 *
15695 * @returns Strict VBox status code.
15696 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15697 * @thread EMT(pVCpu)
15698 */
15699VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15700{
15701 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15702 Assert(!pVCpu->iem.s.cActiveMappings);
15703 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15704}
15705
15706
15707/**
15708 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15709 *
15710 * @returns Strict VBox status code.
15711 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15712 * @thread EMT(pVCpu)
15713 */
15714VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15715{
15716 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15717 Assert(!pVCpu->iem.s.cActiveMappings);
15718 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15719}
15720
15721
15722/**
15723 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15724 *
15725 * @returns Strict VBox status code.
15726 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15727 * @param uVector The external interrupt vector (pass 0 if the external
15728 * interrupt is still pending).
15729 * @param fIntPending Whether the external interrupt is pending or
15730 * acknowdledged in the interrupt controller.
15731 * @thread EMT(pVCpu)
15732 */
15733VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15734{
15735 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15736 Assert(!pVCpu->iem.s.cActiveMappings);
15737 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15738}
15739
15740
15741/**
15742 * Interface for HM and EM to emulate VM-exit due to exceptions.
15743 *
15744 * Exception includes NMIs, software exceptions (those generated by INT3 or
15745 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15746 *
15747 * @returns Strict VBox status code.
15748 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15749 * @param pExitInfo Pointer to the VM-exit information.
15750 * @param pExitEventInfo Pointer to the VM-exit event information.
15751 * @thread EMT(pVCpu)
15752 */
15753VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15754{
15755 Assert(pExitInfo);
15756 Assert(pExitEventInfo);
15757 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
15758 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15759 Assert(!pVCpu->iem.s.cActiveMappings);
15760 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15761}
15762
15763
15764/**
15765 * Interface for HM and EM to emulate VM-exit due to NMIs.
15766 *
15767 * @returns Strict VBox status code.
15768 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15769 * @thread EMT(pVCpu)
15770 */
15771VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15772{
15773 VMXVEXITINFO ExitInfo;
15774 RT_ZERO(ExitInfo);
15775 VMXVEXITEVENTINFO ExitEventInfo;
15776 RT_ZERO(ExitInfo);
15777 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15778 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15779 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15780
15781 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15782 Assert(!pVCpu->iem.s.cActiveMappings);
15783 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15784}
15785
15786
15787/**
15788 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15789 *
15790 * @returns Strict VBox status code.
15791 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15792 * @thread EMT(pVCpu)
15793 */
15794VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15795{
15796 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15797 Assert(!pVCpu->iem.s.cActiveMappings);
15798 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15799}
15800
15801
15802/**
15803 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15804 *
15805 * @returns Strict VBox status code.
15806 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15807 * @param uVector The SIPI vector.
15808 * @thread EMT(pVCpu)
15809 */
15810VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15811{
15812 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15813 Assert(!pVCpu->iem.s.cActiveMappings);
15814 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15815}
15816
15817
15818/**
15819 * Interface for HM and EM to emulate a VM-exit.
15820 *
15821 * If a specialized version of a VM-exit handler exists, that must be used instead.
15822 *
15823 * @returns Strict VBox status code.
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @param uExitReason The VM-exit reason.
15826 * @param u64ExitQual The Exit qualification.
15827 * @thread EMT(pVCpu)
15828 */
15829VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15830{
15831 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15832 Assert(!pVCpu->iem.s.cActiveMappings);
15833 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15834}
15835
15836
15837/**
15838 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15839 *
15840 * This is meant to be used for those instructions that VMX provides additional
15841 * decoding information beyond just the instruction length!
15842 *
15843 * @returns Strict VBox status code.
15844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15845 * @param pExitInfo Pointer to the VM-exit information.
15846 * @thread EMT(pVCpu)
15847 */
15848VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15849{
15850 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15851 Assert(!pVCpu->iem.s.cActiveMappings);
15852 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15853}
15854
15855
15856/**
15857 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15858 *
15859 * This is meant to be used for those instructions that VMX provides only the
15860 * instruction length.
15861 *
15862 * @returns Strict VBox status code.
15863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15864 * @param pExitInfo Pointer to the VM-exit information.
15865 * @param cbInstr The instruction length in bytes.
15866 * @thread EMT(pVCpu)
15867 */
15868VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15869{
15870 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15871 Assert(!pVCpu->iem.s.cActiveMappings);
15872 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15873}
15874
15875
15876/**
15877 * Interface for HM and EM to emulate a VM-exit due to a task switch.
15878 *
15879 * @returns Strict VBox status code.
15880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15881 * @param pExitInfo Pointer to the VM-exit information.
15882 * @param pExitEventInfo Pointer to the VM-exit event information.
15883 * @thread EMT(pVCpu)
15884 */
15885VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15886{
15887 Assert(pExitInfo);
15888 Assert(pExitEventInfo);
15889 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
15890 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15891 Assert(!pVCpu->iem.s.cActiveMappings);
15892 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15893}
15894
15895
15896/**
15897 * Interface for HM and EM to emulate the VMREAD instruction.
15898 *
15899 * @returns Strict VBox status code.
15900 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15901 * @param pExitInfo Pointer to the VM-exit information.
15902 * @thread EMT(pVCpu)
15903 */
15904VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15905{
15906 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15907 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15908 Assert(pExitInfo);
15909
15910 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15911
15912 VBOXSTRICTRC rcStrict;
15913 uint8_t const cbInstr = pExitInfo->cbInstr;
15914 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
15915 uint64_t const u64FieldEnc = fIs64BitMode
15916 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
15917 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15918 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15919 {
15920 if (fIs64BitMode)
15921 {
15922 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15923 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
15924 }
15925 else
15926 {
15927 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15928 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
15929 }
15930 }
15931 else
15932 {
15933 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
15934 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15935 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
15936 }
15937 Assert(!pVCpu->iem.s.cActiveMappings);
15938 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15939}
15940
15941
15942/**
15943 * Interface for HM and EM to emulate the VMWRITE instruction.
15944 *
15945 * @returns Strict VBox status code.
15946 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15947 * @param pExitInfo Pointer to the VM-exit information.
15948 * @thread EMT(pVCpu)
15949 */
15950VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15951{
15952 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15953 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15954 Assert(pExitInfo);
15955
15956 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15957
15958 uint64_t u64Val;
15959 uint8_t iEffSeg;
15960 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15961 {
15962 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15963 iEffSeg = UINT8_MAX;
15964 }
15965 else
15966 {
15967 u64Val = pExitInfo->GCPtrEffAddr;
15968 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15969 }
15970 uint8_t const cbInstr = pExitInfo->cbInstr;
15971 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
15972 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
15973 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15974 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
15975 Assert(!pVCpu->iem.s.cActiveMappings);
15976 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15977}
15978
15979
15980/**
15981 * Interface for HM and EM to emulate the VMPTRLD instruction.
15982 *
15983 * @returns Strict VBox status code.
15984 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15985 * @param pExitInfo Pointer to the VM-exit information.
15986 * @thread EMT(pVCpu)
15987 */
15988VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15989{
15990 Assert(pExitInfo);
15991 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15992 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15993
15994 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15995
15996 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15997 uint8_t const cbInstr = pExitInfo->cbInstr;
15998 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15999 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16000 Assert(!pVCpu->iem.s.cActiveMappings);
16001 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16002}
16003
16004
16005/**
16006 * Interface for HM and EM to emulate the VMPTRST instruction.
16007 *
16008 * @returns Strict VBox status code.
16009 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16010 * @param pExitInfo Pointer to the VM-exit information.
16011 * @thread EMT(pVCpu)
16012 */
16013VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16014{
16015 Assert(pExitInfo);
16016 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16017 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16018
16019 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16020
16021 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16022 uint8_t const cbInstr = pExitInfo->cbInstr;
16023 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16024 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16025 Assert(!pVCpu->iem.s.cActiveMappings);
16026 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16027}
16028
16029
16030/**
16031 * Interface for HM and EM to emulate the VMCLEAR instruction.
16032 *
16033 * @returns Strict VBox status code.
16034 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16035 * @param pExitInfo Pointer to the VM-exit information.
16036 * @thread EMT(pVCpu)
16037 */
16038VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16039{
16040 Assert(pExitInfo);
16041 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16042 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16043
16044 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16045
16046 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16047 uint8_t const cbInstr = pExitInfo->cbInstr;
16048 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16049 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16050 Assert(!pVCpu->iem.s.cActiveMappings);
16051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16052}
16053
16054
16055/**
16056 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16057 *
16058 * @returns Strict VBox status code.
16059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16060 * @param cbInstr The instruction length in bytes.
16061 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16062 * VMXINSTRID_VMRESUME).
16063 * @thread EMT(pVCpu)
16064 */
16065VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16066{
16067 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16068 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16069
16070 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16071 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16072 Assert(!pVCpu->iem.s.cActiveMappings);
16073 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16074}
16075
16076
16077/**
16078 * Interface for HM and EM to emulate the VMXON instruction.
16079 *
16080 * @returns Strict VBox status code.
16081 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16082 * @param pExitInfo Pointer to the VM-exit information.
16083 * @thread EMT(pVCpu)
16084 */
16085VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16086{
16087 Assert(pExitInfo);
16088 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16089 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16090
16091 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16092
16093 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16094 uint8_t const cbInstr = pExitInfo->cbInstr;
16095 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16096 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16097 Assert(!pVCpu->iem.s.cActiveMappings);
16098 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16099}
16100
16101
16102/**
16103 * Interface for HM and EM to emulate the VMXOFF instruction.
16104 *
16105 * @returns Strict VBox status code.
16106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16107 * @param cbInstr The instruction length in bytes.
16108 * @thread EMT(pVCpu)
16109 */
16110VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16111{
16112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16114
16115 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16116 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16117 Assert(!pVCpu->iem.s.cActiveMappings);
16118 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16119}
16120
16121
16122/**
16123 * Interface for HM and EM to emulate the INVVPID instruction.
16124 *
16125 * @returns Strict VBox status code.
16126 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16127 * @param pExitInfo Pointer to the VM-exit information.
16128 * @thread EMT(pVCpu)
16129 */
16130VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16131{
16132 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16133 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16134 Assert(pExitInfo);
16135
16136 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16137
16138 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16139 uint8_t const cbInstr = pExitInfo->cbInstr;
16140 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16141 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16142 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16143 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16144 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16145 Assert(!pVCpu->iem.s.cActiveMappings);
16146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16147}
16148
16149
16150/**
16151 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16152 *
16153 * @remarks The @a pvUser argument is currently unused.
16154 */
16155PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16156 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16157 PGMACCESSORIGIN enmOrigin, void *pvUser)
16158{
16159 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16160
16161 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16162 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16163 {
16164 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16165 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16166
16167 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16168 * Currently they will go through as read accesses. */
16169 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16170 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16171 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16172 if (RT_FAILURE(rcStrict))
16173 return rcStrict;
16174
16175 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16176 return VINF_SUCCESS;
16177 }
16178
16179 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16180 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16181 if (RT_FAILURE(rc))
16182 return rc;
16183
16184 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16185 return VINF_PGM_HANDLER_DO_DEFAULT;
16186}
16187
16188#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16189
16190#ifdef IN_RING3
16191
16192/**
16193 * Handles the unlikely and probably fatal merge cases.
16194 *
16195 * @returns Merged status code.
16196 * @param rcStrict Current EM status code.
16197 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16198 * with @a rcStrict.
16199 * @param iMemMap The memory mapping index. For error reporting only.
16200 * @param pVCpu The cross context virtual CPU structure of the calling
16201 * thread, for error reporting only.
16202 */
16203DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16204 unsigned iMemMap, PVMCPUCC pVCpu)
16205{
16206 if (RT_FAILURE_NP(rcStrict))
16207 return rcStrict;
16208
16209 if (RT_FAILURE_NP(rcStrictCommit))
16210 return rcStrictCommit;
16211
16212 if (rcStrict == rcStrictCommit)
16213 return rcStrictCommit;
16214
16215 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16216 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16217 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16218 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16219 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16220 return VERR_IOM_FF_STATUS_IPE;
16221}
16222
16223
16224/**
16225 * Helper for IOMR3ProcessForceFlag.
16226 *
16227 * @returns Merged status code.
16228 * @param rcStrict Current EM status code.
16229 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16230 * with @a rcStrict.
16231 * @param iMemMap The memory mapping index. For error reporting only.
16232 * @param pVCpu The cross context virtual CPU structure of the calling
16233 * thread, for error reporting only.
16234 */
16235DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16236{
16237 /* Simple. */
16238 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16239 return rcStrictCommit;
16240
16241 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16242 return rcStrict;
16243
16244 /* EM scheduling status codes. */
16245 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16246 && rcStrict <= VINF_EM_LAST))
16247 {
16248 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16249 && rcStrictCommit <= VINF_EM_LAST))
16250 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16251 }
16252
16253 /* Unlikely */
16254 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16255}
16256
16257
16258/**
16259 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16260 *
16261 * @returns Merge between @a rcStrict and what the commit operation returned.
16262 * @param pVM The cross context VM structure.
16263 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16264 * @param rcStrict The status code returned by ring-0 or raw-mode.
16265 */
16266VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16267{
16268 /*
16269 * Reset the pending commit.
16270 */
16271 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16272 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16273 ("%#x %#x %#x\n",
16274 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16275 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16276
16277 /*
16278 * Commit the pending bounce buffers (usually just one).
16279 */
16280 unsigned cBufs = 0;
16281 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16282 while (iMemMap-- > 0)
16283 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16284 {
16285 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16286 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16287 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16288
16289 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16290 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16291 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16292
16293 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16294 {
16295 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16297 pbBuf,
16298 cbFirst,
16299 PGMACCESSORIGIN_IEM);
16300 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16301 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16302 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16303 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16304 }
16305
16306 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16307 {
16308 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16309 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16310 pbBuf + cbFirst,
16311 cbSecond,
16312 PGMACCESSORIGIN_IEM);
16313 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16314 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16315 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16316 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16317 }
16318 cBufs++;
16319 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16320 }
16321
16322 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16323 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16324 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16325 pVCpu->iem.s.cActiveMappings = 0;
16326 return rcStrict;
16327}
16328
16329#endif /* IN_RING3 */
16330
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette