VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 100575

最後變更 在這個檔案從100575是 100326,由 vboxsync 提交於 21 月 前

VMM/IEM: Fixed a problem with 'pop [rsp/esp]' that showed up in bs2-test-1 among other places. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 363.7 KB
 
1/* $Id: IEMAllCImpl.cpp 100326 2023-06-28 23:48:08Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#define IEM_WITH_OPAQUE_DECODER_STATE
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/apic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/nem.h>
44#include <VBox/vmm/gim.h>
45#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
46# include <VBox/vmm/em.h>
47# include <VBox/vmm/hm_svm.h>
48#endif
49#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
50# include <VBox/vmm/hmvmxinline.h>
51#endif
52#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
53# include <VBox/vmm/cpuidcall.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/dbgftrace.h>
58#include "IEMInternal.h"
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/**
76 * Flushes the prefetch buffer, light version.
77 * @todo The \#if conditions here must match the ones in iemOpcodeFlushLight().
78 */
79#ifndef IEM_WITH_CODE_TLB
80# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) iemOpcodeFlushLight(a_pVCpu, a_cbInstr)
81#else
82# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
83#endif
84
85/**
86 * Flushes the prefetch buffer, heavy version.
87 * @todo The \#if conditions here must match the ones in iemOpcodeFlushHeavy().
88 */
89#if !defined(IEM_WITH_CODE_TLB) || 1
90# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) iemOpcodeFlushHeavy(a_pVCpu, a_cbInstr)
91#else
92# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
93#endif
94
95
96
97/** @name Misc Helpers
98 * @{
99 */
100
101
102/**
103 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
104 *
105 * @returns Strict VBox status code.
106 *
107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
108 * @param u16Port The port number.
109 * @param cbOperand The operand size.
110 */
111static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
112{
113 /* The TSS bits we're interested in are the same on 386 and AMD64. */
114 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
115 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
116 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
117 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
118
119 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
120
121 /*
122 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
123 */
124 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
125 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
126 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
127 {
128 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
129 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
130 return iemRaiseGeneralProtectionFault0(pVCpu);
131 }
132
133 /*
134 * Read the bitmap offset (may #PF).
135 */
136 uint16_t offBitmap;
137 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
138 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
139 if (rcStrict != VINF_SUCCESS)
140 {
141 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
142 return rcStrict;
143 }
144
145 /*
146 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
147 * describes the CPU actually reading two bytes regardless of whether the
148 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
149 */
150 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
151 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
152 * for instance sizeof(X86TSS32). */
153 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
154 {
155 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
156 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
157 return iemRaiseGeneralProtectionFault0(pVCpu);
158 }
159
160 /*
161 * Read the necessary bits.
162 */
163 /** @todo Test the assertion in the intel manual that the CPU reads two
164 * bytes. The question is how this works wrt to \#PF and \#GP on the
165 * 2nd byte when it's not required. */
166 uint16_t bmBytes = UINT16_MAX;
167 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
168 if (rcStrict != VINF_SUCCESS)
169 {
170 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
171 return rcStrict;
172 }
173
174 /*
175 * Perform the check.
176 */
177 uint16_t fPortMask = (1 << cbOperand) - 1;
178 bmBytes >>= (u16Port & 7);
179 if (bmBytes & fPortMask)
180 {
181 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
182 u16Port, cbOperand, bmBytes, fPortMask));
183 return iemRaiseGeneralProtectionFault0(pVCpu);
184 }
185
186 return VINF_SUCCESS;
187}
188
189
190/**
191 * Checks if we are allowed to access the given I/O port, raising the
192 * appropriate exceptions if we aren't (or if the I/O bitmap is not
193 * accessible).
194 *
195 * @returns Strict VBox status code.
196 *
197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
198 * @param u16Port The port number.
199 * @param cbOperand The operand size.
200 */
201DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
202{
203 X86EFLAGS Efl;
204 Efl.u = IEMMISC_GET_EFL(pVCpu);
205 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
206 && ( IEM_GET_CPL(pVCpu) > Efl.Bits.u2IOPL
207 || Efl.Bits.u1VM) )
208 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
209 return VINF_SUCCESS;
210}
211
212
213#if 0
214/**
215 * Calculates the parity bit.
216 *
217 * @returns true if the bit is set, false if not.
218 * @param u8Result The least significant byte of the result.
219 */
220static bool iemHlpCalcParityFlag(uint8_t u8Result)
221{
222 /*
223 * Parity is set if the number of bits in the least significant byte of
224 * the result is even.
225 */
226 uint8_t cBits;
227 cBits = u8Result & 1; /* 0 */
228 u8Result >>= 1;
229 cBits += u8Result & 1;
230 u8Result >>= 1;
231 cBits += u8Result & 1;
232 u8Result >>= 1;
233 cBits += u8Result & 1;
234 u8Result >>= 1;
235 cBits += u8Result & 1; /* 4 */
236 u8Result >>= 1;
237 cBits += u8Result & 1;
238 u8Result >>= 1;
239 cBits += u8Result & 1;
240 u8Result >>= 1;
241 cBits += u8Result & 1;
242 return !(cBits & 1);
243}
244#endif /* not used */
245
246
247/**
248 * Updates the specified flags according to a 8-bit result.
249 *
250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
251 * @param u8Result The result to set the flags according to.
252 * @param fToUpdate The flags to update.
253 * @param fUndefined The flags that are specified as undefined.
254 */
255static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
256{
257 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
258 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
259 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
260 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
261}
262
263
264/**
265 * Updates the specified flags according to a 16-bit result.
266 *
267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
268 * @param u16Result The result to set the flags according to.
269 * @param fToUpdate The flags to update.
270 * @param fUndefined The flags that are specified as undefined.
271 */
272static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
273{
274 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
275 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
276 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
277 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
278}
279
280
281/**
282 * Helper used by iret.
283 *
284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
285 * @param uCpl The new CPL.
286 * @param pSReg Pointer to the segment register.
287 */
288static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
289{
290 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
291 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
292
293 if ( uCpl > pSReg->Attr.n.u2Dpl
294 && pSReg->Attr.n.u1DescType /* code or data, not system */
295 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
296 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
297 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
298}
299
300
301/**
302 * Indicates that we have modified the FPU state.
303 *
304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
305 */
306DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
307{
308 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
309}
310
311/** @} */
312
313/** @name C Implementations
314 * @{
315 */
316
317
318/**
319 * Implements a pop [mem16].
320 */
321IEM_CIMPL_DEF_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
322{
323 uint16_t u16Value;
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
326 VBOXSTRICTRC rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 {
329LogAlways(("iemCImpl_pop_mem16: iEffSeg=%d GCPtrEffDst=%RGv u16Value=%#x\n", iEffSeg, GCPtrEffDst, u16Value));
330 rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
334 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
335 }
336 }
337 return rcStrict;
338
339}
340
341
342/**
343 * Implements a pop [mem32].
344 */
345IEM_CIMPL_DEF_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
346{
347 uint32_t u32Value;
348 RTUINT64U TmpRsp;
349 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
350 VBOXSTRICTRC rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
351 if (rcStrict == VINF_SUCCESS)
352 {
353LogAlways(("iemCImpl_pop_mem32: iEffSeg=%d GCPtrEffDst=%RGv u32Value=%#x\n", iEffSeg, GCPtrEffDst, u32Value));
354 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEffDst, u32Value);
355 if (rcStrict == VINF_SUCCESS)
356 {
357 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
358 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
359 }
360 }
361 return rcStrict;
362
363}
364
365
366/**
367 * Implements a pop [mem64].
368 */
369IEM_CIMPL_DEF_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
370{
371 uint64_t u64Value;
372 RTUINT64U TmpRsp;
373 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
374 VBOXSTRICTRC rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
375 if (rcStrict == VINF_SUCCESS)
376 {
377 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrEffDst, u64Value);
378 if (rcStrict == VINF_SUCCESS)
379 {
380 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
381 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
382 }
383 }
384 return rcStrict;
385
386}
387
388
389/**
390 * Implements a 16-bit popa.
391 */
392IEM_CIMPL_DEF_0(iemCImpl_popa_16)
393{
394 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
395 RTGCPTR GCPtrLast = GCPtrStart + 15;
396 VBOXSTRICTRC rcStrict;
397
398 /*
399 * The docs are a bit hard to comprehend here, but it looks like we wrap
400 * around in real mode as long as none of the individual "popa" crosses the
401 * end of the stack segment. In protected mode we check the whole access
402 * in one go. For efficiency, only do the word-by-word thing if we're in
403 * danger of wrapping around.
404 */
405 /** @todo do popa boundary / wrap-around checks. */
406 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
407 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
408 {
409 /* word-by-word */
410 RTUINT64U TmpRsp;
411 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
412 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
413 if (rcStrict == VINF_SUCCESS)
414 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
415 if (rcStrict == VINF_SUCCESS)
416 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
417 if (rcStrict == VINF_SUCCESS)
418 {
419 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
420 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
421 }
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
428 if (rcStrict == VINF_SUCCESS)
429 {
430 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
431 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
432 }
433 }
434 else
435 {
436 uint16_t const *pa16Mem = NULL;
437 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
438 if (rcStrict == VINF_SUCCESS)
439 {
440 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
441 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
442 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
443 /* skip sp */
444 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
445 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
446 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
447 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
448 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
449 if (rcStrict == VINF_SUCCESS)
450 {
451 iemRegAddToRsp(pVCpu, 16);
452 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
453 }
454 }
455 }
456 return rcStrict;
457}
458
459
460/**
461 * Implements a 32-bit popa.
462 */
463IEM_CIMPL_DEF_0(iemCImpl_popa_32)
464{
465 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
466 RTGCPTR GCPtrLast = GCPtrStart + 31;
467 VBOXSTRICTRC rcStrict;
468
469 /*
470 * The docs are a bit hard to comprehend here, but it looks like we wrap
471 * around in real mode as long as none of the individual "popa" crosses the
472 * end of the stack segment. In protected mode we check the whole access
473 * in one go. For efficiency, only do the word-by-word thing if we're in
474 * danger of wrapping around.
475 */
476 /** @todo do popa boundary / wrap-around checks. */
477 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
478 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
479 {
480 /* word-by-word */
481 RTUINT64U TmpRsp;
482 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
483 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 {
490 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
491 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
492 }
493 if (rcStrict == VINF_SUCCESS)
494 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
495 if (rcStrict == VINF_SUCCESS)
496 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
497 if (rcStrict == VINF_SUCCESS)
498 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
499 if (rcStrict == VINF_SUCCESS)
500 {
501#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
502 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
503 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
504 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
505 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
506 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
507 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
508 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
509#endif
510 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
511 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
512 }
513 }
514 else
515 {
516 uint32_t const *pa32Mem;
517 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
518 if (rcStrict == VINF_SUCCESS)
519 {
520 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
521 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
522 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
523 /* skip esp */
524 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
525 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
526 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
527 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
528 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
529 if (rcStrict == VINF_SUCCESS)
530 {
531 iemRegAddToRsp(pVCpu, 32);
532 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
533 }
534 }
535 }
536 return rcStrict;
537}
538
539
540/**
541 * Implements a 16-bit pusha.
542 */
543IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
544{
545 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
546 RTGCPTR GCPtrBottom = GCPtrTop - 15;
547 VBOXSTRICTRC rcStrict;
548
549 /*
550 * The docs are a bit hard to comprehend here, but it looks like we wrap
551 * around in real mode as long as none of the individual "pushd" crosses the
552 * end of the stack segment. In protected mode we check the whole access
553 * in one go. For efficiency, only do the word-by-word thing if we're in
554 * danger of wrapping around.
555 */
556 /** @todo do pusha boundary / wrap-around checks. */
557 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
558 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
559 {
560 /* word-by-word */
561 RTUINT64U TmpRsp;
562 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
563 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
564 if (rcStrict == VINF_SUCCESS)
565 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
566 if (rcStrict == VINF_SUCCESS)
567 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
568 if (rcStrict == VINF_SUCCESS)
569 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
570 if (rcStrict == VINF_SUCCESS)
571 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
572 if (rcStrict == VINF_SUCCESS)
573 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
574 if (rcStrict == VINF_SUCCESS)
575 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
576 if (rcStrict == VINF_SUCCESS)
577 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
578 if (rcStrict == VINF_SUCCESS)
579 {
580 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
581 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
582 }
583 }
584 else
585 {
586 GCPtrBottom--;
587 uint16_t *pa16Mem = NULL;
588 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
589 if (rcStrict == VINF_SUCCESS)
590 {
591 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
592 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
593 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
594 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
595 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
596 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
597 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
598 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
599 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
600 if (rcStrict == VINF_SUCCESS)
601 {
602 iemRegSubFromRsp(pVCpu, 16);
603 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
604 }
605 }
606 }
607 return rcStrict;
608}
609
610
611/**
612 * Implements a 32-bit pusha.
613 */
614IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
615{
616 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
617 RTGCPTR GCPtrBottom = GCPtrTop - 31;
618 VBOXSTRICTRC rcStrict;
619
620 /*
621 * The docs are a bit hard to comprehend here, but it looks like we wrap
622 * around in real mode as long as none of the individual "pusha" crosses the
623 * end of the stack segment. In protected mode we check the whole access
624 * in one go. For efficiency, only do the word-by-word thing if we're in
625 * danger of wrapping around.
626 */
627 /** @todo do pusha boundary / wrap-around checks. */
628 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
629 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
630 {
631 /* word-by-word */
632 RTUINT64U TmpRsp;
633 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
634 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
635 if (rcStrict == VINF_SUCCESS)
636 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
637 if (rcStrict == VINF_SUCCESS)
638 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
639 if (rcStrict == VINF_SUCCESS)
640 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
641 if (rcStrict == VINF_SUCCESS)
642 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
643 if (rcStrict == VINF_SUCCESS)
644 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
645 if (rcStrict == VINF_SUCCESS)
646 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
647 if (rcStrict == VINF_SUCCESS)
648 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
649 if (rcStrict == VINF_SUCCESS)
650 {
651 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
652 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
653 }
654 }
655 else
656 {
657 GCPtrBottom--;
658 uint32_t *pa32Mem;
659 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
660 if (rcStrict == VINF_SUCCESS)
661 {
662 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
663 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
664 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
665 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
666 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
667 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
668 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
669 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
670 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
671 if (rcStrict == VINF_SUCCESS)
672 {
673 iemRegSubFromRsp(pVCpu, 32);
674 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
675 }
676 }
677 }
678 return rcStrict;
679}
680
681
682/**
683 * Implements pushf.
684 *
685 *
686 * @param enmEffOpSize The effective operand size.
687 */
688IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
689{
690 VBOXSTRICTRC rcStrict;
691
692 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
693 { /* probable */ }
694 else
695 {
696 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
697 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
698 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
699 }
700
701 /*
702 * If we're in V8086 mode some care is required (which is why we're in
703 * doing this in a C implementation).
704 */
705 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
706 if ( (fEfl & X86_EFL_VM)
707 && X86_EFL_GET_IOPL(fEfl) != 3 )
708 {
709 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
710 if ( enmEffOpSize != IEMMODE_16BIT
711 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
712 return iemRaiseGeneralProtectionFault0(pVCpu);
713 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
714 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
715 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
716 }
717 else
718 {
719
720 /*
721 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
722 */
723 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
724
725 switch (enmEffOpSize)
726 {
727 case IEMMODE_16BIT:
728 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
729 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
730 fEfl |= UINT16_C(0xf000);
731 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
732 break;
733 case IEMMODE_32BIT:
734 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
735 break;
736 case IEMMODE_64BIT:
737 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
738 break;
739 IEM_NOT_REACHED_DEFAULT_CASE_RET();
740 }
741 }
742
743 if (rcStrict == VINF_SUCCESS)
744 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
745 return rcStrict;
746}
747
748
749/**
750 * Implements popf.
751 *
752 * @param enmEffOpSize The effective operand size.
753 */
754IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
755{
756 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
757 VBOXSTRICTRC rcStrict;
758 uint32_t fEflNew;
759
760 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
761 { /* probable */ }
762 else
763 {
764 Log2(("popf: Guest intercept -> #VMEXIT\n"));
765 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
766 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
767 }
768
769 /*
770 * V8086 is special as usual.
771 */
772 if (fEflOld & X86_EFL_VM)
773 {
774 /*
775 * Almost anything goes if IOPL is 3.
776 */
777 if (X86_EFL_GET_IOPL(fEflOld) == 3)
778 {
779 switch (enmEffOpSize)
780 {
781 case IEMMODE_16BIT:
782 {
783 uint16_t u16Value;
784 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
785 if (rcStrict != VINF_SUCCESS)
786 return rcStrict;
787 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
788 break;
789 }
790 case IEMMODE_32BIT:
791 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
792 if (rcStrict != VINF_SUCCESS)
793 return rcStrict;
794 break;
795 IEM_NOT_REACHED_DEFAULT_CASE_RET();
796 }
797
798 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
799 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
800 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
801 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
802 }
803 /*
804 * Interrupt flag virtualization with CR4.VME=1.
805 */
806 else if ( enmEffOpSize == IEMMODE_16BIT
807 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
808 {
809 uint16_t u16Value;
810 RTUINT64U TmpRsp;
811 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
812 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
813 if (rcStrict != VINF_SUCCESS)
814 return rcStrict;
815
816 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
817 * or before? */
818 if ( ( (u16Value & X86_EFL_IF)
819 && (fEflOld & X86_EFL_VIP))
820 || (u16Value & X86_EFL_TF) )
821 return iemRaiseGeneralProtectionFault0(pVCpu);
822
823 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
824 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
825 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
826 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
827
828 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
829 }
830 else
831 return iemRaiseGeneralProtectionFault0(pVCpu);
832
833 }
834 /*
835 * Not in V8086 mode.
836 */
837 else
838 {
839 /* Pop the flags. */
840 switch (enmEffOpSize)
841 {
842 case IEMMODE_16BIT:
843 {
844 uint16_t u16Value;
845 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
846 if (rcStrict != VINF_SUCCESS)
847 return rcStrict;
848 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
849
850 /*
851 * Ancient CPU adjustments:
852 * - 8086, 80186, V20/30:
853 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
854 * practical reasons (masking below). We add them when pushing flags.
855 * - 80286:
856 * The NT and IOPL flags cannot be popped from real mode and are
857 * therefore always zero (since a 286 can never exit from PM and
858 * their initial value is zero). This changed on a 386 and can
859 * therefore be used to detect 286 or 386 CPU in real mode.
860 */
861 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
862 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
863 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
864 break;
865 }
866 case IEMMODE_32BIT:
867 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
868 if (rcStrict != VINF_SUCCESS)
869 return rcStrict;
870 break;
871 case IEMMODE_64BIT:
872 {
873 uint64_t u64Value;
874 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
875 if (rcStrict != VINF_SUCCESS)
876 return rcStrict;
877 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
878 break;
879 }
880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
881 }
882
883 /* Merge them with the current flags. */
884 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
885 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
886 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
887 || IEM_GET_CPL(pVCpu) == 0)
888 {
889 fEflNew &= fPopfBits;
890 fEflNew |= ~fPopfBits & fEflOld;
891 }
892 else if (IEM_GET_CPL(pVCpu) <= X86_EFL_GET_IOPL(fEflOld))
893 {
894 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
895 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
896 }
897 else
898 {
899 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
900 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
901 }
902 }
903
904 /*
905 * Commit the flags.
906 */
907 Assert(fEflNew & RT_BIT_32(1));
908 IEMMISC_SET_EFL(pVCpu, fEflNew);
909 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
910}
911
912
913/**
914 * Implements an indirect call.
915 *
916 * @param uNewPC The new program counter (RIP) value (loaded from the
917 * operand).
918 */
919IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
920{
921 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
922 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
923 {
924 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
925 if (rcStrict == VINF_SUCCESS)
926 {
927 pVCpu->cpum.GstCtx.rip = uNewPC;
928 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
929 return iemRegFinishClearingRF(pVCpu);
930 }
931 return rcStrict;
932 }
933 return iemRaiseGeneralProtectionFault0(pVCpu);
934}
935
936
937/**
938 * Implements a 16-bit relative call.
939 *
940 * @param offDisp The displacment offset.
941 */
942IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
943{
944 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
945 uint16_t const uNewPC = uOldPC + offDisp;
946 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
947 {
948 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
949 if (rcStrict == VINF_SUCCESS)
950 {
951 pVCpu->cpum.GstCtx.rip = uNewPC;
952 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
953 return iemRegFinishClearingRF(pVCpu);
954 }
955 return rcStrict;
956 }
957 return iemRaiseGeneralProtectionFault0(pVCpu);
958}
959
960
961/**
962 * Implements a 32-bit indirect call.
963 *
964 * @param uNewPC The new program counter (RIP) value (loaded from the
965 * operand).
966 */
967IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
968{
969 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
970 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
971 {
972 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
973 if (rcStrict == VINF_SUCCESS)
974 {
975 pVCpu->cpum.GstCtx.rip = uNewPC;
976 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
977 return iemRegFinishClearingRF(pVCpu);
978 }
979 return rcStrict;
980 }
981 return iemRaiseGeneralProtectionFault0(pVCpu);
982}
983
984
985/**
986 * Implements a 32-bit relative call.
987 *
988 * @param offDisp The displacment offset.
989 */
990IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
991{
992 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
993 uint32_t const uNewPC = uOldPC + offDisp;
994 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
995 {
996 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
997 if (rcStrict == VINF_SUCCESS)
998 {
999 pVCpu->cpum.GstCtx.rip = uNewPC;
1000 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
1001 return iemRegFinishClearingRF(pVCpu);
1002 }
1003 return rcStrict;
1004 }
1005 return iemRaiseGeneralProtectionFault0(pVCpu);
1006}
1007
1008
1009/**
1010 * Implements a 64-bit indirect call.
1011 *
1012 * @param uNewPC The new program counter (RIP) value (loaded from the
1013 * operand).
1014 */
1015IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
1016{
1017 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
1018 if (IEM_IS_CANONICAL(uNewPC))
1019 {
1020 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
1021 if (rcStrict == VINF_SUCCESS)
1022 {
1023 pVCpu->cpum.GstCtx.rip = uNewPC;
1024 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
1025 return iemRegFinishClearingRF(pVCpu);
1026 }
1027 return rcStrict;
1028 }
1029 return iemRaiseGeneralProtectionFault0(pVCpu);
1030}
1031
1032
1033/**
1034 * Implements a 64-bit relative call.
1035 *
1036 * @param offDisp The displacment offset.
1037 */
1038IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
1039{
1040 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
1041 uint64_t const uNewPC = uOldPC + offDisp;
1042 if (IEM_IS_CANONICAL(uNewPC))
1043 {
1044 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
1045 if (rcStrict == VINF_SUCCESS)
1046 {
1047 pVCpu->cpum.GstCtx.rip = uNewPC;
1048 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
1049 return iemRegFinishClearingRF(pVCpu);
1050 }
1051 return rcStrict;
1052 }
1053 return iemRaiseNotCanonical(pVCpu);
1054}
1055
1056
1057/**
1058 * Implements far jumps and calls thru task segments (TSS).
1059 *
1060 * @returns VBox strict status code.
1061 * @param pVCpu The cross context virtual CPU structure of the
1062 * calling thread.
1063 * @param cbInstr The current instruction length.
1064 * @param uSel The selector.
1065 * @param enmBranch The kind of branching we're performing.
1066 * @param enmEffOpSize The effective operand size.
1067 * @param pDesc The descriptor corresponding to @a uSel. The type is
1068 * task gate.
1069 */
1070static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1071 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1072{
1073#ifndef IEM_IMPLEMENTS_TASKSWITCH
1074 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1075#else
1076 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1077 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1078 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
1079 RT_NOREF_PV(enmEffOpSize);
1080 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1081
1082 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1083 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1084 {
1085 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1086 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1087 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1088 }
1089
1090 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1091 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1092 * checked here, need testcases. */
1093 if (!pDesc->Legacy.Gen.u1Present)
1094 {
1095 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1096 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1097 }
1098
1099 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1100 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1101 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1102#endif
1103}
1104
1105
1106/**
1107 * Implements far jumps and calls thru task gates.
1108 *
1109 * @returns VBox strict status code.
1110 * @param pVCpu The cross context virtual CPU structure of the
1111 * calling thread.
1112 * @param cbInstr The current instruction length.
1113 * @param uSel The selector.
1114 * @param enmBranch The kind of branching we're performing.
1115 * @param enmEffOpSize The effective operand size.
1116 * @param pDesc The descriptor corresponding to @a uSel. The type is
1117 * task gate.
1118 */
1119static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1120 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1121{
1122#ifndef IEM_IMPLEMENTS_TASKSWITCH
1123 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1124#else
1125 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1126 RT_NOREF_PV(enmEffOpSize);
1127 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1128
1129 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1130 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1131 {
1132 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1133 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1134 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1135 }
1136
1137 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1138 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1139 * checked here, need testcases. */
1140 if (!pDesc->Legacy.Gen.u1Present)
1141 {
1142 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1143 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1144 }
1145
1146 /*
1147 * Fetch the new TSS descriptor from the GDT.
1148 */
1149 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1150 if (uSelTss & X86_SEL_LDT)
1151 {
1152 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1153 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1154 }
1155
1156 IEMSELDESC TssDesc;
1157 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1158 if (rcStrict != VINF_SUCCESS)
1159 return rcStrict;
1160
1161 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1162 {
1163 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1164 TssDesc.Legacy.Gate.u4Type));
1165 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1166 }
1167
1168 if (!TssDesc.Legacy.Gate.u1Present)
1169 {
1170 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1171 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1172 }
1173
1174 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1175 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1176 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1177#endif
1178}
1179
1180
1181/**
1182 * Implements far jumps and calls thru call gates.
1183 *
1184 * @returns VBox strict status code.
1185 * @param pVCpu The cross context virtual CPU structure of the
1186 * calling thread.
1187 * @param cbInstr The current instruction length.
1188 * @param uSel The selector.
1189 * @param enmBranch The kind of branching we're performing.
1190 * @param enmEffOpSize The effective operand size.
1191 * @param pDesc The descriptor corresponding to @a uSel. The type is
1192 * call gate.
1193 */
1194static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1195 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1196{
1197#define IEM_IMPLEMENTS_CALLGATE
1198#ifndef IEM_IMPLEMENTS_CALLGATE
1199 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1200#else
1201 RT_NOREF_PV(enmEffOpSize);
1202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1203
1204 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1205 * inter-privilege calls and are much more complex.
1206 *
1207 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1208 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1209 * must be 16-bit or 32-bit.
1210 */
1211 /** @todo effective operand size is probably irrelevant here, only the
1212 * call gate bitness matters??
1213 */
1214 VBOXSTRICTRC rcStrict;
1215 RTPTRUNION uPtrRet;
1216 uint64_t uNewRsp;
1217 uint64_t uNewRip;
1218 uint64_t u64Base;
1219 uint32_t cbLimit;
1220 RTSEL uNewCS;
1221 IEMSELDESC DescCS;
1222
1223 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1224 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1225 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1226 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1227
1228 /* Determine the new instruction pointer from the gate descriptor. */
1229 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1230 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1231 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1232
1233 /* Perform DPL checks on the gate descriptor. */
1234 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1235 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1236 {
1237 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1238 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1239 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1240 }
1241
1242 /** @todo does this catch NULL selectors, too? */
1243 if (!pDesc->Legacy.Gen.u1Present)
1244 {
1245 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1246 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1247 }
1248
1249 /*
1250 * Fetch the target CS descriptor from the GDT or LDT.
1251 */
1252 uNewCS = pDesc->Legacy.Gate.u16Sel;
1253 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1254 if (rcStrict != VINF_SUCCESS)
1255 return rcStrict;
1256
1257 /* Target CS must be a code selector. */
1258 if ( !DescCS.Legacy.Gen.u1DescType
1259 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1260 {
1261 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1262 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1263 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1264 }
1265
1266 /* Privilege checks on target CS. */
1267 if (enmBranch == IEMBRANCH_JUMP)
1268 {
1269 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1270 {
1271 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1272 {
1273 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1274 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1275 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1276 }
1277 }
1278 else
1279 {
1280 if (DescCS.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
1281 {
1282 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1283 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1284 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1285 }
1286 }
1287 }
1288 else
1289 {
1290 Assert(enmBranch == IEMBRANCH_CALL);
1291 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1292 {
1293 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1294 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1295 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1296 }
1297 }
1298
1299 /* Additional long mode checks. */
1300 if (IEM_IS_LONG_MODE(pVCpu))
1301 {
1302 if (!DescCS.Legacy.Gen.u1Long)
1303 {
1304 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1305 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1306 }
1307
1308 /* L vs D. */
1309 if ( DescCS.Legacy.Gen.u1Long
1310 && DescCS.Legacy.Gen.u1DefBig)
1311 {
1312 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1313 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1314 }
1315 }
1316
1317 if (!DescCS.Legacy.Gate.u1Present)
1318 {
1319 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1320 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1321 }
1322
1323 if (enmBranch == IEMBRANCH_JUMP)
1324 {
1325 /** @todo This is very similar to regular far jumps; merge! */
1326 /* Jumps are fairly simple... */
1327
1328 /* Chop the high bits off if 16-bit gate (Intel says so). */
1329 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1330 uNewRip = (uint16_t)uNewRip;
1331
1332 /* Limit check for non-long segments. */
1333 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1334 if (DescCS.Legacy.Gen.u1Long)
1335 u64Base = 0;
1336 else
1337 {
1338 if (uNewRip > cbLimit)
1339 {
1340 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1341 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1342 }
1343 u64Base = X86DESC_BASE(&DescCS.Legacy);
1344 }
1345
1346 /* Canonical address check. */
1347 if (!IEM_IS_CANONICAL(uNewRip))
1348 {
1349 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1350 return iemRaiseNotCanonical(pVCpu);
1351 }
1352
1353 /*
1354 * Ok, everything checked out fine. Now set the accessed bit before
1355 * committing the result into CS, CSHID and RIP.
1356 */
1357 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1358 {
1359 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1360 if (rcStrict != VINF_SUCCESS)
1361 return rcStrict;
1362 /** @todo check what VT-x and AMD-V does. */
1363 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1364 }
1365
1366 /* commit */
1367 pVCpu->cpum.GstCtx.rip = uNewRip;
1368 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1369 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1370 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1371 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1372 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1373 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1374 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1375 }
1376 else
1377 {
1378 Assert(enmBranch == IEMBRANCH_CALL);
1379 /* Calls are much more complicated. */
1380
1381 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu)))
1382 {
1383 uint16_t offNewStack; /* Offset of new stack in TSS. */
1384 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1385 uint8_t uNewCSDpl;
1386 uint8_t cbWords;
1387 RTSEL uNewSS;
1388 RTSEL uOldSS;
1389 uint64_t uOldRsp;
1390 IEMSELDESC DescSS;
1391 RTPTRUNION uPtrTSS;
1392 RTGCPTR GCPtrTSS;
1393 RTPTRUNION uPtrParmWds;
1394 RTGCPTR GCPtrParmWds;
1395
1396 /* More privilege. This is the fun part. */
1397 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1398
1399 /*
1400 * Determine new SS:rSP from the TSS.
1401 */
1402 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1403
1404 /* Figure out where the new stack pointer is stored in the TSS. */
1405 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1406 if (!IEM_IS_LONG_MODE(pVCpu))
1407 {
1408 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1409 {
1410 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1411 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1412 }
1413 else
1414 {
1415 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1416 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1417 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1418 }
1419 }
1420 else
1421 {
1422 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1423 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1424 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1425 }
1426
1427 /* Check against TSS limit. */
1428 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1429 {
1430 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1431 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1432 }
1433
1434 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1435 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
1436 if (rcStrict != VINF_SUCCESS)
1437 {
1438 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1439 return rcStrict;
1440 }
1441
1442 if (!IEM_IS_LONG_MODE(pVCpu))
1443 {
1444 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1445 {
1446 uNewRsp = uPtrTSS.pu32[0];
1447 uNewSS = uPtrTSS.pu16[2];
1448 }
1449 else
1450 {
1451 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1452 uNewRsp = uPtrTSS.pu16[0];
1453 uNewSS = uPtrTSS.pu16[1];
1454 }
1455 }
1456 else
1457 {
1458 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1459 /* SS will be a NULL selector, but that's valid. */
1460 uNewRsp = uPtrTSS.pu64[0];
1461 uNewSS = uNewCSDpl;
1462 }
1463
1464 /* Done with the TSS now. */
1465 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1466 if (rcStrict != VINF_SUCCESS)
1467 {
1468 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1469 return rcStrict;
1470 }
1471
1472 /* Only used outside of long mode. */
1473 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1474
1475 /* If EFER.LMA is 0, there's extra work to do. */
1476 if (!IEM_IS_LONG_MODE(pVCpu))
1477 {
1478 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1479 {
1480 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1481 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1482 }
1483
1484 /* Grab the new SS descriptor. */
1485 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1486 if (rcStrict != VINF_SUCCESS)
1487 return rcStrict;
1488
1489 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1490 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1491 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1492 {
1493 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1494 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1495 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1496 }
1497
1498 /* Ensure new SS is a writable data segment. */
1499 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1500 {
1501 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1502 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1503 }
1504
1505 if (!DescSS.Legacy.Gen.u1Present)
1506 {
1507 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1508 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1509 }
1510 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1511 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1512 else
1513 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1514 }
1515 else
1516 {
1517 /* Just grab the new (NULL) SS descriptor. */
1518 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1519 * like we do... */
1520 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1521 if (rcStrict != VINF_SUCCESS)
1522 return rcStrict;
1523
1524 cbNewStack = sizeof(uint64_t) * 4;
1525 }
1526
1527 /** @todo According to Intel, new stack is checked for enough space first,
1528 * then switched. According to AMD, the stack is switched first and
1529 * then pushes might fault!
1530 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1531 * incoming stack \#PF happens before actual stack switch. AMD is
1532 * either lying or implicitly assumes that new state is committed
1533 * only if and when an instruction doesn't fault.
1534 */
1535
1536 /** @todo According to AMD, CS is loaded first, then SS.
1537 * According to Intel, it's the other way around!?
1538 */
1539
1540 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1541
1542 /* Set the accessed bit before committing new SS. */
1543 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1544 {
1545 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1546 if (rcStrict != VINF_SUCCESS)
1547 return rcStrict;
1548 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1549 }
1550
1551 /* Remember the old SS:rSP and their linear address. */
1552 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1553 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1554
1555 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1556
1557 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1558 or #PF, the former is not implemented in this workaround. */
1559 /** @todo Proper fix callgate target stack exceptions. */
1560 /** @todo testcase: Cover callgates with partially or fully inaccessible
1561 * target stacks. */
1562 void *pvNewFrame;
1563 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1564 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1565 if (rcStrict != VINF_SUCCESS)
1566 {
1567 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1568 return rcStrict;
1569 }
1570 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1571 if (rcStrict != VINF_SUCCESS)
1572 {
1573 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1574 return rcStrict;
1575 }
1576
1577 /* Commit new SS:rSP. */
1578 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1579 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1580 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1581 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1582 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1583 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1584 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1585 IEM_SET_CPL(pVCpu, uNewCSDpl); /** @todo Are the parameter words accessed using the new CPL or the old CPL? */
1586 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1587 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1588
1589 /* At this point the stack access must not fail because new state was already committed. */
1590 /** @todo this can still fail due to SS.LIMIT not check. */
1591 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1592 IEM_IS_LONG_MODE(pVCpu) ? 7
1593 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1594 &uPtrRet.pv, &uNewRsp);
1595 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1596 VERR_INTERNAL_ERROR_5);
1597
1598 if (!IEM_IS_LONG_MODE(pVCpu))
1599 {
1600 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1601 {
1602 if (cbWords)
1603 {
1604 /* Map the relevant chunk of the old stack. */
1605 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1606 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1607 if (rcStrict != VINF_SUCCESS)
1608 {
1609 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1610 return rcStrict;
1611 }
1612
1613 /* Copy the parameter (d)words. */
1614 for (int i = 0; i < cbWords; ++i)
1615 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1616
1617 /* Unmap the old stack. */
1618 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1619 if (rcStrict != VINF_SUCCESS)
1620 {
1621 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1622 return rcStrict;
1623 }
1624 }
1625
1626 /* Push the old CS:rIP. */
1627 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1628 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1629
1630 /* Push the old SS:rSP. */
1631 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1632 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1633 }
1634 else
1635 {
1636 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1637
1638 if (cbWords)
1639 {
1640 /* Map the relevant chunk of the old stack. */
1641 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1642 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1643 if (rcStrict != VINF_SUCCESS)
1644 {
1645 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1646 return rcStrict;
1647 }
1648
1649 /* Copy the parameter words. */
1650 for (int i = 0; i < cbWords; ++i)
1651 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1652
1653 /* Unmap the old stack. */
1654 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1655 if (rcStrict != VINF_SUCCESS)
1656 {
1657 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1658 return rcStrict;
1659 }
1660 }
1661
1662 /* Push the old CS:rIP. */
1663 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1664 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1665
1666 /* Push the old SS:rSP. */
1667 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1668 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1669 }
1670 }
1671 else
1672 {
1673 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1674
1675 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1676 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1677 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1678 uPtrRet.pu64[2] = uOldRsp;
1679 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1680 }
1681
1682 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1683 if (rcStrict != VINF_SUCCESS)
1684 {
1685 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1686 return rcStrict;
1687 }
1688
1689 /* Chop the high bits off if 16-bit gate (Intel says so). */
1690 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1691 uNewRip = (uint16_t)uNewRip;
1692
1693 /* Limit / canonical check. */
1694 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1695 if (!IEM_IS_LONG_MODE(pVCpu))
1696 {
1697 if (uNewRip > cbLimit)
1698 {
1699 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1700 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1701 }
1702 u64Base = X86DESC_BASE(&DescCS.Legacy);
1703 }
1704 else
1705 {
1706 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1707 if (!IEM_IS_CANONICAL(uNewRip))
1708 {
1709 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1710 return iemRaiseNotCanonical(pVCpu);
1711 }
1712 u64Base = 0;
1713 }
1714
1715 /*
1716 * Now set the accessed bit before
1717 * writing the return address to the stack and committing the result into
1718 * CS, CSHID and RIP.
1719 */
1720 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1721 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1722 {
1723 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1724 if (rcStrict != VINF_SUCCESS)
1725 return rcStrict;
1726 /** @todo check what VT-x and AMD-V does. */
1727 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1728 }
1729
1730 /* Commit new CS:rIP. */
1731 pVCpu->cpum.GstCtx.rip = uNewRip;
1732 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1733 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1734 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1735 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1736 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1737 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1738 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1739 }
1740 else
1741 {
1742 /* Same privilege. */
1743 /** @todo This is very similar to regular far calls; merge! */
1744
1745 /* Check stack first - may #SS(0). */
1746 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1747 * 16-bit code cause a two or four byte CS to be pushed? */
1748 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1749 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1750 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1751 IEM_IS_LONG_MODE(pVCpu) ? 7
1752 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1753 &uPtrRet.pv, &uNewRsp);
1754 if (rcStrict != VINF_SUCCESS)
1755 return rcStrict;
1756
1757 /* Chop the high bits off if 16-bit gate (Intel says so). */
1758 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1759 uNewRip = (uint16_t)uNewRip;
1760
1761 /* Limit / canonical check. */
1762 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1763 if (!IEM_IS_LONG_MODE(pVCpu))
1764 {
1765 if (uNewRip > cbLimit)
1766 {
1767 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1768 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1769 }
1770 u64Base = X86DESC_BASE(&DescCS.Legacy);
1771 }
1772 else
1773 {
1774 if (!IEM_IS_CANONICAL(uNewRip))
1775 {
1776 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1777 return iemRaiseNotCanonical(pVCpu);
1778 }
1779 u64Base = 0;
1780 }
1781
1782 /*
1783 * Now set the accessed bit before
1784 * writing the return address to the stack and committing the result into
1785 * CS, CSHID and RIP.
1786 */
1787 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1788 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1789 {
1790 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1791 if (rcStrict != VINF_SUCCESS)
1792 return rcStrict;
1793 /** @todo check what VT-x and AMD-V does. */
1794 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1795 }
1796
1797 /* stack */
1798 if (!IEM_IS_LONG_MODE(pVCpu))
1799 {
1800 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1801 {
1802 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1803 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1804 }
1805 else
1806 {
1807 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1808 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1809 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1810 }
1811 }
1812 else
1813 {
1814 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1815 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1816 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1817 }
1818
1819 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1820 if (rcStrict != VINF_SUCCESS)
1821 return rcStrict;
1822
1823 /* commit */
1824 pVCpu->cpum.GstCtx.rip = uNewRip;
1825 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1826 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1827 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1829 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1830 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1831 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1832 }
1833 }
1834 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1835
1836 iemRecalcExecModeAndCplFlags(pVCpu);
1837
1838/** @todo single stepping */
1839
1840 /* Flush the prefetch buffer. */
1841 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1842 return VINF_SUCCESS;
1843#endif /* IEM_IMPLEMENTS_CALLGATE */
1844}
1845
1846
1847/**
1848 * Implements far jumps and calls thru system selectors.
1849 *
1850 * @returns VBox strict status code.
1851 * @param pVCpu The cross context virtual CPU structure of the
1852 * calling thread.
1853 * @param cbInstr The current instruction length.
1854 * @param uSel The selector.
1855 * @param enmBranch The kind of branching we're performing.
1856 * @param enmEffOpSize The effective operand size.
1857 * @param pDesc The descriptor corresponding to @a uSel.
1858 */
1859static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1860 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1861{
1862 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1863 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1864 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1865
1866 if (IEM_IS_LONG_MODE(pVCpu))
1867 switch (pDesc->Legacy.Gen.u4Type)
1868 {
1869 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1870 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1871
1872 default:
1873 case AMD64_SEL_TYPE_SYS_LDT:
1874 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1875 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1876 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1877 case AMD64_SEL_TYPE_SYS_INT_GATE:
1878 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1879 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1880 }
1881
1882 switch (pDesc->Legacy.Gen.u4Type)
1883 {
1884 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1885 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1886 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1887
1888 case X86_SEL_TYPE_SYS_TASK_GATE:
1889 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1890
1891 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1892 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1893 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1894
1895 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1896 Log(("branch %04x -> busy 286 TSS\n", uSel));
1897 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1898
1899 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1900 Log(("branch %04x -> busy 386 TSS\n", uSel));
1901 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1902
1903 default:
1904 case X86_SEL_TYPE_SYS_LDT:
1905 case X86_SEL_TYPE_SYS_286_INT_GATE:
1906 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1907 case X86_SEL_TYPE_SYS_386_INT_GATE:
1908 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1909 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1910 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1911 }
1912}
1913
1914
1915/**
1916 * Implements far jumps.
1917 *
1918 * @param uSel The selector.
1919 * @param offSeg The segment offset.
1920 * @param enmEffOpSize The effective operand size.
1921 */
1922IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1923{
1924 NOREF(cbInstr);
1925 Assert(offSeg <= UINT32_MAX || (!IEM_IS_GUEST_CPU_AMD(pVCpu) && IEM_IS_64BIT_CODE(pVCpu)));
1926
1927 /*
1928 * Real mode and V8086 mode are easy. The only snag seems to be that
1929 * CS.limit doesn't change and the limit check is done against the current
1930 * limit.
1931 */
1932 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1933 * 1998) that up to and including the Intel 486, far control
1934 * transfers in real mode set default CS attributes (0x93) and also
1935 * set a 64K segment limit. Starting with the Pentium, the
1936 * attributes and limit are left alone but the access rights are
1937 * ignored. We only implement the Pentium+ behavior.
1938 * */
1939 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1940 {
1941 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1942 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1943 {
1944 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1945 return iemRaiseGeneralProtectionFault0(pVCpu);
1946 }
1947
1948 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1949 pVCpu->cpum.GstCtx.rip = offSeg;
1950 else
1951 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1952 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1953 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1954 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1955 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1956
1957 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
1958 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
1959 { /* likely */ }
1960 else if (uSel != 0)
1961 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
1962 else
1963 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
1964 | iemCalc32BitFlatIndicator(pVCpu);
1965
1966 return iemRegFinishClearingRF(pVCpu);
1967 }
1968
1969 /*
1970 * Protected mode. Need to parse the specified descriptor...
1971 */
1972 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1973 {
1974 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1975 return iemRaiseGeneralProtectionFault0(pVCpu);
1976 }
1977
1978 /* Fetch the descriptor. */
1979 IEMSELDESC Desc;
1980 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1981 if (rcStrict != VINF_SUCCESS)
1982 return rcStrict;
1983
1984 /* Is it there? */
1985 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1986 {
1987 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1988 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1989 }
1990
1991 /*
1992 * Deal with it according to its type. We do the standard code selectors
1993 * here and dispatch the system selectors to worker functions.
1994 */
1995 if (!Desc.Legacy.Gen.u1DescType)
1996 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1997
1998 /* Only code segments. */
1999 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2000 {
2001 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2002 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2003 }
2004
2005 /* L vs D. */
2006 if ( Desc.Legacy.Gen.u1Long
2007 && Desc.Legacy.Gen.u1DefBig
2008 && IEM_IS_LONG_MODE(pVCpu))
2009 {
2010 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2011 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2012 }
2013
2014 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2015 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2016 {
2017 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
2018 {
2019 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2020 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2021 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2022 }
2023 }
2024 else
2025 {
2026 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
2027 {
2028 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2029 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2030 }
2031 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
2032 {
2033 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
2034 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2035 }
2036 }
2037
2038 /* Chop the high bits if 16-bit (Intel says so). */
2039 if (enmEffOpSize == IEMMODE_16BIT)
2040 offSeg &= UINT16_MAX;
2041
2042 /* Limit check and get the base. */
2043 uint64_t u64Base;
2044 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2045 if ( !Desc.Legacy.Gen.u1Long
2046 || !IEM_IS_LONG_MODE(pVCpu))
2047 {
2048 if (RT_LIKELY(offSeg <= cbLimit))
2049 u64Base = X86DESC_BASE(&Desc.Legacy);
2050 else
2051 {
2052 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2053 /** @todo Intel says this is \#GP(0)! */
2054 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2055 }
2056 }
2057 else
2058 u64Base = 0;
2059
2060 /*
2061 * Ok, everything checked out fine. Now set the accessed bit before
2062 * committing the result into CS, CSHID and RIP.
2063 */
2064 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2065 {
2066 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2067 if (rcStrict != VINF_SUCCESS)
2068 return rcStrict;
2069 /** @todo check what VT-x and AMD-V does. */
2070 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2071 }
2072
2073 /* commit */
2074 pVCpu->cpum.GstCtx.rip = offSeg;
2075 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2076 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
2077 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2078 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2079 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2080 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2081 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2082
2083 /** @todo check if the hidden bits are loaded correctly for 64-bit
2084 * mode. */
2085
2086 iemRecalcExecModeAndCplFlags(pVCpu);
2087
2088 /* Flush the prefetch buffer. */
2089 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2090
2091 return iemRegFinishClearingRF(pVCpu);
2092}
2093
2094
2095/**
2096 * Implements far calls.
2097 *
2098 * This very similar to iemCImpl_FarJmp.
2099 *
2100 * @param uSel The selector.
2101 * @param offSeg The segment offset.
2102 * @param enmEffOpSize The operand size (in case we need it).
2103 */
2104IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
2105{
2106 VBOXSTRICTRC rcStrict;
2107 uint64_t uNewRsp;
2108 RTPTRUNION uPtrRet;
2109
2110 /*
2111 * Real mode and V8086 mode are easy. The only snag seems to be that
2112 * CS.limit doesn't change and the limit check is done against the current
2113 * limit.
2114 */
2115 /** @todo See comment for similar code in iemCImpl_FarJmp */
2116 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2117 {
2118 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2119
2120 /* Check stack first - may #SS(0). */
2121 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2122 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2123 &uPtrRet.pv, &uNewRsp);
2124 if (rcStrict != VINF_SUCCESS)
2125 return rcStrict;
2126
2127 /* Check the target address range. */
2128/** @todo this must be wrong! Write unreal mode tests! */
2129 if (offSeg > UINT32_MAX)
2130 return iemRaiseGeneralProtectionFault0(pVCpu);
2131
2132 /* Everything is fine, push the return address. */
2133 if (enmEffOpSize == IEMMODE_16BIT)
2134 {
2135 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2136 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2137 }
2138 else
2139 {
2140 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2141 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2142 }
2143 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2144 if (rcStrict != VINF_SUCCESS)
2145 return rcStrict;
2146
2147 /* Branch. */
2148 pVCpu->cpum.GstCtx.rip = offSeg;
2149 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2150 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2151 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2152 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2153
2154 return iemRegFinishClearingRF(pVCpu);
2155 }
2156
2157 /*
2158 * Protected mode. Need to parse the specified descriptor...
2159 */
2160 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2161 {
2162 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2163 return iemRaiseGeneralProtectionFault0(pVCpu);
2164 }
2165
2166 /* Fetch the descriptor. */
2167 IEMSELDESC Desc;
2168 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2169 if (rcStrict != VINF_SUCCESS)
2170 return rcStrict;
2171
2172 /*
2173 * Deal with it according to its type. We do the standard code selectors
2174 * here and dispatch the system selectors to worker functions.
2175 */
2176 if (!Desc.Legacy.Gen.u1DescType)
2177 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2178
2179 /* Only code segments. */
2180 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2181 {
2182 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2183 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2184 }
2185
2186 /* L vs D. */
2187 if ( Desc.Legacy.Gen.u1Long
2188 && Desc.Legacy.Gen.u1DefBig
2189 && IEM_IS_LONG_MODE(pVCpu))
2190 {
2191 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2192 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2193 }
2194
2195 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2196 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2197 {
2198 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
2199 {
2200 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2201 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2202 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2203 }
2204 }
2205 else
2206 {
2207 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
2208 {
2209 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2210 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2211 }
2212 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
2213 {
2214 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
2215 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2216 }
2217 }
2218
2219 /* Is it there? */
2220 if (!Desc.Legacy.Gen.u1Present)
2221 {
2222 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2223 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2224 }
2225
2226 /* Check stack first - may #SS(0). */
2227 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2228 * 16-bit code cause a two or four byte CS to be pushed? */
2229 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2230 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2231 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2232 &uPtrRet.pv, &uNewRsp);
2233 if (rcStrict != VINF_SUCCESS)
2234 return rcStrict;
2235
2236 /* Chop the high bits if 16-bit (Intel says so). */
2237 if (enmEffOpSize == IEMMODE_16BIT)
2238 offSeg &= UINT16_MAX;
2239
2240 /* Limit / canonical check. */
2241 uint64_t u64Base;
2242 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2243 if ( !Desc.Legacy.Gen.u1Long
2244 || !IEM_IS_LONG_MODE(pVCpu))
2245 {
2246 if (RT_LIKELY(offSeg <= cbLimit))
2247 u64Base = X86DESC_BASE(&Desc.Legacy);
2248 else
2249 {
2250 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2251 /** @todo Intel says this is \#GP(0)! */
2252 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2253 }
2254 }
2255 else if (IEM_IS_CANONICAL(offSeg))
2256 u64Base = 0;
2257 else
2258 {
2259 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2260 return iemRaiseNotCanonical(pVCpu);
2261 }
2262
2263 /*
2264 * Now set the accessed bit before
2265 * writing the return address to the stack and committing the result into
2266 * CS, CSHID and RIP.
2267 */
2268 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2269 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2270 {
2271 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2272 if (rcStrict != VINF_SUCCESS)
2273 return rcStrict;
2274 /** @todo check what VT-x and AMD-V does. */
2275 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2276 }
2277
2278 /* stack */
2279 if (enmEffOpSize == IEMMODE_16BIT)
2280 {
2281 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2282 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2283 }
2284 else if (enmEffOpSize == IEMMODE_32BIT)
2285 {
2286 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2287 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2288 }
2289 else
2290 {
2291 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2292 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2293 }
2294 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2295 if (rcStrict != VINF_SUCCESS)
2296 return rcStrict;
2297
2298 /* commit */
2299 pVCpu->cpum.GstCtx.rip = offSeg;
2300 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2301 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
2302 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2303 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2304 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2305 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2306 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2307
2308 /** @todo check if the hidden bits are loaded correctly for 64-bit
2309 * mode. */
2310
2311 iemRecalcExecDbgFlags(pVCpu);
2312
2313 /* Flush the prefetch buffer. */
2314 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2315
2316 return iemRegFinishClearingRF(pVCpu);
2317}
2318
2319
2320/**
2321 * Implements retf.
2322 *
2323 * @param enmEffOpSize The effective operand size.
2324 * @param cbPop The amount of arguments to pop from the stack
2325 * (bytes).
2326 */
2327IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2328{
2329 VBOXSTRICTRC rcStrict;
2330 RTCPTRUNION uPtrFrame;
2331 RTUINT64U NewRsp;
2332 uint64_t uNewRip;
2333 uint16_t uNewCs;
2334 NOREF(cbInstr);
2335
2336 /*
2337 * Read the stack values first.
2338 */
2339 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2340 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2341 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2342 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2343 &uPtrFrame.pv, &NewRsp.u);
2344 if (rcStrict != VINF_SUCCESS)
2345 return rcStrict;
2346 if (enmEffOpSize == IEMMODE_16BIT)
2347 {
2348 uNewRip = uPtrFrame.pu16[0];
2349 uNewCs = uPtrFrame.pu16[1];
2350 }
2351 else if (enmEffOpSize == IEMMODE_32BIT)
2352 {
2353 uNewRip = uPtrFrame.pu32[0];
2354 uNewCs = uPtrFrame.pu16[2];
2355 }
2356 else
2357 {
2358 uNewRip = uPtrFrame.pu64[0];
2359 uNewCs = uPtrFrame.pu16[4];
2360 }
2361 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2363 { /* extremely likely */ }
2364 else
2365 return rcStrict;
2366
2367 /*
2368 * Real mode and V8086 mode are easy.
2369 */
2370 /** @todo See comment for similar code in iemCImpl_FarJmp */
2371 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2372 {
2373 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2374 /** @todo check how this is supposed to work if sp=0xfffe. */
2375
2376 /* Check the limit of the new EIP. */
2377 /** @todo Intel pseudo code only does the limit check for 16-bit
2378 * operands, AMD does not make any distinction. What is right? */
2379 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2380 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2381
2382 /* commit the operation. */
2383 if (cbPop)
2384 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2385 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2386 pVCpu->cpum.GstCtx.rip = uNewRip;
2387 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2388 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2389 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2390 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2391 return iemRegFinishClearingRF(pVCpu);
2392 }
2393
2394 /*
2395 * Protected mode is complicated, of course.
2396 */
2397 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2398 {
2399 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2400 return iemRaiseGeneralProtectionFault0(pVCpu);
2401 }
2402
2403 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2404
2405 /* Fetch the descriptor. */
2406 IEMSELDESC DescCs;
2407 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2408 if (rcStrict != VINF_SUCCESS)
2409 return rcStrict;
2410
2411 /* Can only return to a code selector. */
2412 if ( !DescCs.Legacy.Gen.u1DescType
2413 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2414 {
2415 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2416 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2417 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2418 }
2419
2420 /* L vs D. */
2421 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2422 && DescCs.Legacy.Gen.u1DefBig
2423 && IEM_IS_LONG_MODE(pVCpu))
2424 {
2425 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2426 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2427 }
2428
2429 /* DPL/RPL/CPL checks. */
2430 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
2431 {
2432 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
2433 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2434 }
2435
2436 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2437 {
2438 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2439 {
2440 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2441 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2442 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2443 }
2444 }
2445 else
2446 {
2447 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2448 {
2449 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2450 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2451 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2452 }
2453 }
2454
2455 /* Is it there? */
2456 if (!DescCs.Legacy.Gen.u1Present)
2457 {
2458 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2459 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2460 }
2461
2462 /*
2463 * Return to outer privilege? (We'll typically have entered via a call gate.)
2464 */
2465 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
2466 {
2467 /* Read the outer stack pointer stored *after* the parameters. */
2468 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, NewRsp.u);
2469 if (rcStrict != VINF_SUCCESS)
2470 return rcStrict;
2471
2472 uint16_t uNewOuterSs;
2473 RTUINT64U NewOuterRsp;
2474 if (enmEffOpSize == IEMMODE_16BIT)
2475 {
2476 NewOuterRsp.u = uPtrFrame.pu16[0];
2477 uNewOuterSs = uPtrFrame.pu16[1];
2478 }
2479 else if (enmEffOpSize == IEMMODE_32BIT)
2480 {
2481 NewOuterRsp.u = uPtrFrame.pu32[0];
2482 uNewOuterSs = uPtrFrame.pu16[2];
2483 }
2484 else
2485 {
2486 NewOuterRsp.u = uPtrFrame.pu64[0];
2487 uNewOuterSs = uPtrFrame.pu16[4];
2488 }
2489 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2490 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2491 { /* extremely likely */ }
2492 else
2493 return rcStrict;
2494
2495 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2496 and read the selector. */
2497 IEMSELDESC DescSs;
2498 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2499 {
2500 if ( !DescCs.Legacy.Gen.u1Long
2501 || (uNewOuterSs & X86_SEL_RPL) == 3)
2502 {
2503 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2504 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2505 return iemRaiseGeneralProtectionFault0(pVCpu);
2506 }
2507 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2508 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2509 }
2510 else
2511 {
2512 /* Fetch the descriptor for the new stack segment. */
2513 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2514 if (rcStrict != VINF_SUCCESS)
2515 return rcStrict;
2516 }
2517
2518 /* Check that RPL of stack and code selectors match. */
2519 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2520 {
2521 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2522 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2523 }
2524
2525 /* Must be a writable data segment. */
2526 if ( !DescSs.Legacy.Gen.u1DescType
2527 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2528 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2529 {
2530 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2531 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2532 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2533 }
2534
2535 /* L vs D. (Not mentioned by intel.) */
2536 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2537 && DescSs.Legacy.Gen.u1DefBig
2538 && IEM_IS_LONG_MODE(pVCpu))
2539 {
2540 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2541 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2542 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2543 }
2544
2545 /* DPL/RPL/CPL checks. */
2546 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2547 {
2548 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2549 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2550 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2551 }
2552
2553 /* Is it there? */
2554 if (!DescSs.Legacy.Gen.u1Present)
2555 {
2556 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2557 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2558 }
2559
2560 /* Calc SS limit.*/
2561 uint64_t u64BaseSs;
2562 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2563
2564 /* Is RIP canonical or within CS.limit? */
2565 uint64_t u64BaseCs;
2566 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2567
2568 /** @todo Testcase: Is this correct? */
2569 if ( DescCs.Legacy.Gen.u1Long
2570 && IEM_IS_LONG_MODE(pVCpu) )
2571 {
2572 if (!IEM_IS_CANONICAL(uNewRip))
2573 {
2574 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2575 return iemRaiseNotCanonical(pVCpu);
2576 }
2577 u64BaseCs = 0;
2578 u64BaseSs = 0;
2579 }
2580 else
2581 {
2582 if (uNewRip > cbLimitCs)
2583 {
2584 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2585 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, cbLimitCs));
2586 /** @todo Intel says this is \#GP(0)! */
2587 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2588 }
2589 u64BaseCs = X86DESC_BASE(&DescCs.Legacy);
2590 u64BaseSs = X86DESC_BASE(&DescSs.Legacy);
2591 }
2592
2593 /*
2594 * Now set the accessed bit before
2595 * writing the return address to the stack and committing the result into
2596 * CS, CSHID and RIP.
2597 */
2598 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2599 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2600 {
2601 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2602 if (rcStrict != VINF_SUCCESS)
2603 return rcStrict;
2604 /** @todo check what VT-x and AMD-V does. */
2605 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2606 }
2607 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2608 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2609 {
2610 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2611 if (rcStrict != VINF_SUCCESS)
2612 return rcStrict;
2613 /** @todo check what VT-x and AMD-V does. */
2614 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2615 }
2616
2617 /* commit */
2618 if (enmEffOpSize == IEMMODE_16BIT)
2619 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2620 else
2621 pVCpu->cpum.GstCtx.rip = uNewRip;
2622 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2623 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2624 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2625 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2626 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2627 pVCpu->cpum.GstCtx.cs.u64Base = u64BaseCs;
2628 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2629 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2630 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2631 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2632 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2633 pVCpu->cpum.GstCtx.ss.u64Base = u64BaseSs;
2634
2635 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2636 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2637 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2638 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2639
2640 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2641
2642 if (cbPop)
2643 iemRegAddToRspEx(pVCpu, &NewOuterRsp, cbPop);
2644 if (IEM_IS_64BIT_CODE(pVCpu))
2645 pVCpu->cpum.GstCtx.rsp = NewOuterRsp.u;
2646 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2647 pVCpu->cpum.GstCtx.rsp = (uint32_t)NewOuterRsp.u;
2648 else
2649 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u;
2650
2651 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2652
2653 /** @todo check if the hidden bits are loaded correctly for 64-bit
2654 * mode. */
2655 }
2656 /*
2657 * Return to the same privilege level
2658 */
2659 else
2660 {
2661 /* Limit / canonical check. */
2662 uint64_t u64Base;
2663 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2664
2665 /** @todo Testcase: Is this correct? */
2666 bool f64BitCs = false;
2667 if ( DescCs.Legacy.Gen.u1Long
2668 && IEM_IS_LONG_MODE(pVCpu) )
2669 {
2670 if (!IEM_IS_CANONICAL(uNewRip))
2671 {
2672 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2673 return iemRaiseNotCanonical(pVCpu);
2674 }
2675 u64Base = 0;
2676 f64BitCs = true;
2677 f64BitCs = true;
2678 }
2679 else
2680 {
2681 if (uNewRip > cbLimitCs)
2682 {
2683 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2684 /** @todo Intel says this is \#GP(0)! */
2685 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2686 }
2687 u64Base = X86DESC_BASE(&DescCs.Legacy);
2688 }
2689
2690 /*
2691 * Now set the accessed bit before
2692 * writing the return address to the stack and committing the result into
2693 * CS, CSHID and RIP.
2694 */
2695 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2696 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2697 {
2698 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2699 if (rcStrict != VINF_SUCCESS)
2700 return rcStrict;
2701 /** @todo check what VT-x and AMD-V does. */
2702 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2703 }
2704
2705 /* commit */
2706 if (cbPop)
2707/** @todo This cannot be right. We're using the old CS mode here, and iemRegAddToRspEx checks fExec. */
2708 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2709 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig || f64BitCs)
2710 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2711 else
2712 pVCpu->cpum.GstCtx.sp = (uint16_t)NewRsp.u;
2713 if (enmEffOpSize == IEMMODE_16BIT)
2714 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2715 else
2716 pVCpu->cpum.GstCtx.rip = uNewRip;
2717 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2718 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2719 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2720 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2721 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2722 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2723 /** @todo check if the hidden bits are loaded correctly for 64-bit
2724 * mode. */
2725
2726 iemRecalcExecModeAndCplFlags(pVCpu);
2727 }
2728
2729 /* Flush the prefetch buffer. */
2730 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privilege? */
2731
2732 return iemRegFinishClearingRF(pVCpu);
2733}
2734
2735
2736/**
2737 * Implements retn and retn imm16.
2738 *
2739 * We're doing this in C because of the \#GP that might be raised if the popped
2740 * program counter is out of bounds.
2741 *
2742 * The hope with this forced inline worker function, is that the compiler will
2743 * be clever enough to eliminate unused code for the constant enmEffOpSize and
2744 * maybe cbPop parameters.
2745 *
2746 * @param pVCpu The cross context virtual CPU structure of the
2747 * calling thread.
2748 * @param cbInstr The current instruction length.
2749 * @param enmEffOpSize The effective operand size. This is constant.
2750 * @param cbPop The amount of arguments to pop from the stack
2751 * (bytes). This can be constant (zero).
2752 */
2753DECL_FORCE_INLINE(VBOXSTRICTRC) iemCImpl_ReturnNearCommon(PVMCPUCC pVCpu, uint8_t cbInstr, IEMMODE enmEffOpSize, uint16_t cbPop)
2754{
2755 /* Fetch the RSP from the stack. */
2756 VBOXSTRICTRC rcStrict;
2757 RTUINT64U NewRip;
2758 RTUINT64U NewRsp;
2759 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2760
2761 switch (enmEffOpSize)
2762 {
2763 case IEMMODE_16BIT:
2764 NewRip.u = 0;
2765 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2766 break;
2767 case IEMMODE_32BIT:
2768 NewRip.u = 0;
2769 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2770 break;
2771 case IEMMODE_64BIT:
2772 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2773 break;
2774 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2775 }
2776 if (rcStrict != VINF_SUCCESS)
2777 return rcStrict;
2778
2779 /* Check the new RSP before loading it. */
2780 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2781 * of it. The canonical test is performed here and for call. */
2782 if (enmEffOpSize != IEMMODE_64BIT)
2783 {
2784 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
2785 { /* likely */ }
2786 else
2787 {
2788 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2789 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2790 }
2791 }
2792 else
2793 {
2794 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
2795 { /* likely */ }
2796 else
2797 {
2798 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2799 return iemRaiseNotCanonical(pVCpu);
2800 }
2801 }
2802
2803 /* Apply cbPop */
2804 if (cbPop)
2805 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2806
2807 /* Commit it. */
2808 pVCpu->cpum.GstCtx.rip = NewRip.u;
2809 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2810
2811 /* Flush the prefetch buffer. */
2812 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo only need a light flush here, don't we? We don't really need any flushing... */
2813 RT_NOREF(cbInstr);
2814
2815 return iemRegFinishClearingRF(pVCpu);
2816}
2817
2818
2819/**
2820 * Implements retn imm16 with 16-bit effective operand size.
2821 *
2822 * @param cbPop The amount of arguments to pop from the stack (bytes).
2823 */
2824IEM_CIMPL_DEF_1(iemCImpl_retn_iw_16, uint16_t, cbPop)
2825{
2826 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, cbPop);
2827}
2828
2829
2830/**
2831 * Implements retn imm16 with 32-bit effective operand size.
2832 *
2833 * @param cbPop The amount of arguments to pop from the stack (bytes).
2834 */
2835IEM_CIMPL_DEF_1(iemCImpl_retn_iw_32, uint16_t, cbPop)
2836{
2837 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, cbPop);
2838}
2839
2840
2841/**
2842 * Implements retn imm16 with 64-bit effective operand size.
2843 *
2844 * @param cbPop The amount of arguments to pop from the stack (bytes).
2845 */
2846IEM_CIMPL_DEF_1(iemCImpl_retn_iw_64, uint16_t, cbPop)
2847{
2848 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, cbPop);
2849}
2850
2851
2852/**
2853 * Implements retn with 16-bit effective operand size.
2854 */
2855IEM_CIMPL_DEF_0(iemCImpl_retn_16)
2856{
2857 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, 0);
2858}
2859
2860
2861/**
2862 * Implements retn with 32-bit effective operand size.
2863 */
2864IEM_CIMPL_DEF_0(iemCImpl_retn_32)
2865{
2866 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, 0);
2867}
2868
2869
2870/**
2871 * Implements retn with 64-bit effective operand size.
2872 */
2873IEM_CIMPL_DEF_0(iemCImpl_retn_64)
2874{
2875 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, 0);
2876}
2877
2878
2879/**
2880 * Implements enter.
2881 *
2882 * We're doing this in C because the instruction is insane, even for the
2883 * u8NestingLevel=0 case dealing with the stack is tedious.
2884 *
2885 * @param enmEffOpSize The effective operand size.
2886 * @param cbFrame Frame size.
2887 * @param cParameters Frame parameter count.
2888 */
2889IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2890{
2891 /* Push RBP, saving the old value in TmpRbp. */
2892 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2893 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2894 RTUINT64U NewRbp;
2895 VBOXSTRICTRC rcStrict;
2896 if (enmEffOpSize == IEMMODE_64BIT)
2897 {
2898 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2899 NewRbp = NewRsp;
2900 }
2901 else if (enmEffOpSize == IEMMODE_32BIT)
2902 {
2903 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2904 NewRbp = NewRsp;
2905 }
2906 else
2907 {
2908 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2909 NewRbp = TmpRbp;
2910 NewRbp.Words.w0 = NewRsp.Words.w0;
2911 }
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914
2915 /* Copy the parameters (aka nesting levels by Intel). */
2916 cParameters &= 0x1f;
2917 if (cParameters > 0)
2918 {
2919 switch (enmEffOpSize)
2920 {
2921 case IEMMODE_16BIT:
2922 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2923 TmpRbp.DWords.dw0 -= 2;
2924 else
2925 TmpRbp.Words.w0 -= 2;
2926 do
2927 {
2928 uint16_t u16Tmp;
2929 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2930 if (rcStrict != VINF_SUCCESS)
2931 break;
2932 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2933 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2934 break;
2935
2936 case IEMMODE_32BIT:
2937 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2938 TmpRbp.DWords.dw0 -= 4;
2939 else
2940 TmpRbp.Words.w0 -= 4;
2941 do
2942 {
2943 uint32_t u32Tmp;
2944 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2945 if (rcStrict != VINF_SUCCESS)
2946 break;
2947 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2948 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2949 break;
2950
2951 case IEMMODE_64BIT:
2952 TmpRbp.u -= 8;
2953 do
2954 {
2955 uint64_t u64Tmp;
2956 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2957 if (rcStrict != VINF_SUCCESS)
2958 break;
2959 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2960 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2961 break;
2962
2963 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2964 }
2965 if (rcStrict != VINF_SUCCESS)
2966 return VINF_SUCCESS;
2967
2968 /* Push the new RBP */
2969 if (enmEffOpSize == IEMMODE_64BIT)
2970 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2971 else if (enmEffOpSize == IEMMODE_32BIT)
2972 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2973 else
2974 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2975 if (rcStrict != VINF_SUCCESS)
2976 return rcStrict;
2977
2978 }
2979
2980 /* Recalc RSP. */
2981 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2982
2983 /** @todo Should probe write access at the new RSP according to AMD. */
2984 /** @todo Should handle accesses to the VMX APIC-access page. */
2985
2986 /* Commit it. */
2987 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2988 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2989 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2990}
2991
2992
2993
2994/**
2995 * Implements leave.
2996 *
2997 * We're doing this in C because messing with the stack registers is annoying
2998 * since they depends on SS attributes.
2999 *
3000 * @param enmEffOpSize The effective operand size.
3001 */
3002IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
3003{
3004 /* Calculate the intermediate RSP from RBP and the stack attributes. */
3005 RTUINT64U NewRsp;
3006 if (IEM_IS_64BIT_CODE(pVCpu))
3007 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
3008 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3009 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
3010 else
3011 {
3012 /** @todo Check that LEAVE actually preserve the high EBP bits. */
3013 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
3014 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
3015 }
3016
3017 /* Pop RBP according to the operand size. */
3018 VBOXSTRICTRC rcStrict;
3019 RTUINT64U NewRbp;
3020 switch (enmEffOpSize)
3021 {
3022 case IEMMODE_16BIT:
3023 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
3024 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
3025 break;
3026 case IEMMODE_32BIT:
3027 NewRbp.u = 0;
3028 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
3029 break;
3030 case IEMMODE_64BIT:
3031 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
3032 break;
3033 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3034 }
3035 if (rcStrict != VINF_SUCCESS)
3036 return rcStrict;
3037
3038
3039 /* Commit it. */
3040 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
3041 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
3042 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
3043}
3044
3045
3046/**
3047 * Implements int3 and int XX.
3048 *
3049 * @param u8Int The interrupt vector number.
3050 * @param enmInt The int instruction type.
3051 */
3052IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
3053{
3054 Assert(pVCpu->iem.s.cXcptRecursions == 0);
3055
3056 /*
3057 * We must check if this INT3 might belong to DBGF before raising a #BP.
3058 */
3059 if (u8Int == 3)
3060 {
3061 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3062 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
3063 { /* likely: No vbox debugger breakpoints */ }
3064 else
3065 {
3066 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
3067 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3068 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
3069 return iemSetPassUpStatus(pVCpu, rcStrict);
3070 }
3071 }
3072/** @todo single stepping */
3073 return iemRaiseXcptOrInt(pVCpu,
3074 cbInstr,
3075 u8Int,
3076 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
3077 0,
3078 0);
3079}
3080
3081
3082/**
3083 * Implements iret for real mode and V8086 mode.
3084 *
3085 * @param enmEffOpSize The effective operand size.
3086 */
3087IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
3088{
3089 X86EFLAGS Efl;
3090 Efl.u = IEMMISC_GET_EFL(pVCpu);
3091 NOREF(cbInstr);
3092
3093 /*
3094 * iret throws an exception if VME isn't enabled.
3095 */
3096 if ( Efl.Bits.u1VM
3097 && Efl.Bits.u2IOPL != 3
3098 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
3099 return iemRaiseGeneralProtectionFault0(pVCpu);
3100
3101 /*
3102 * Do the stack bits, but don't commit RSP before everything checks
3103 * out right.
3104 */
3105 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3106 VBOXSTRICTRC rcStrict;
3107 RTCPTRUNION uFrame;
3108 uint16_t uNewCs;
3109 uint32_t uNewEip;
3110 uint32_t uNewFlags;
3111 uint64_t uNewRsp;
3112 if (enmEffOpSize == IEMMODE_32BIT)
3113 {
3114 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
3115 if (rcStrict != VINF_SUCCESS)
3116 return rcStrict;
3117 uNewEip = uFrame.pu32[0];
3118 if (uNewEip > UINT16_MAX)
3119 return iemRaiseGeneralProtectionFault0(pVCpu);
3120
3121 uNewCs = (uint16_t)uFrame.pu32[1];
3122 uNewFlags = uFrame.pu32[2];
3123 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3124 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
3125 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
3126 | X86_EFL_ID;
3127 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3128 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3129 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
3130 }
3131 else
3132 {
3133 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3134 if (rcStrict != VINF_SUCCESS)
3135 return rcStrict;
3136 uNewEip = uFrame.pu16[0];
3137 uNewCs = uFrame.pu16[1];
3138 uNewFlags = uFrame.pu16[2];
3139 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3140 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
3141 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
3142 /** @todo The intel pseudo code does not indicate what happens to
3143 * reserved flags. We just ignore them. */
3144 /* Ancient CPU adjustments: See iemCImpl_popf. */
3145 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
3146 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
3147 }
3148 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
3149 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3150 { /* extremely likely */ }
3151 else
3152 return rcStrict;
3153
3154 /** @todo Check how this is supposed to work if sp=0xfffe. */
3155 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
3156 uNewCs, uNewEip, uNewFlags, uNewRsp));
3157
3158 /*
3159 * Check the limit of the new EIP.
3160 */
3161 /** @todo Only the AMD pseudo code check the limit here, what's
3162 * right? */
3163 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
3164 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3165
3166 /*
3167 * V8086 checks and flag adjustments
3168 */
3169 if (Efl.Bits.u1VM)
3170 {
3171 if (Efl.Bits.u2IOPL == 3)
3172 {
3173 /* Preserve IOPL and clear RF. */
3174 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
3175 uNewFlags |= Efl.u & (X86_EFL_IOPL);
3176 }
3177 else if ( enmEffOpSize == IEMMODE_16BIT
3178 && ( !(uNewFlags & X86_EFL_IF)
3179 || !Efl.Bits.u1VIP )
3180 && !(uNewFlags & X86_EFL_TF) )
3181 {
3182 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
3183 uNewFlags &= ~X86_EFL_VIF;
3184 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
3185 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
3186 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
3187 }
3188 else
3189 return iemRaiseGeneralProtectionFault0(pVCpu);
3190 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3191 }
3192
3193 /*
3194 * Commit the operation.
3195 */
3196#ifdef DBGFTRACE_ENABLED
3197 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3198 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3199#endif
3200 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3201 pVCpu->cpum.GstCtx.rip = uNewEip;
3202 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3203 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3204 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3205 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
3206 /** @todo do we load attribs and limit as well? */
3207 Assert(uNewFlags & X86_EFL_1);
3208 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3209
3210 /* Flush the prefetch buffer. */
3211 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
3212
3213/** @todo single stepping */
3214 return VINF_SUCCESS;
3215}
3216
3217
3218/**
3219 * Loads a segment register when entering V8086 mode.
3220 *
3221 * @param pSReg The segment register.
3222 * @param uSeg The segment to load.
3223 */
3224static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3225{
3226 pSReg->Sel = uSeg;
3227 pSReg->ValidSel = uSeg;
3228 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3229 pSReg->u64Base = (uint32_t)uSeg << 4;
3230 pSReg->u32Limit = 0xffff;
3231 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3232 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3233 * IRET'ing to V8086. */
3234}
3235
3236
3237/**
3238 * Implements iret for protected mode returning to V8086 mode.
3239 *
3240 * @param uNewEip The new EIP.
3241 * @param uNewCs The new CS.
3242 * @param uNewFlags The new EFLAGS.
3243 * @param uNewRsp The RSP after the initial IRET frame.
3244 *
3245 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3246 */
3247IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3248{
3249 RT_NOREF_PV(cbInstr);
3250 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3251
3252 /*
3253 * Pop the V8086 specific frame bits off the stack.
3254 */
3255 VBOXSTRICTRC rcStrict;
3256 RTCPTRUNION uFrame;
3257 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
3258 if (rcStrict != VINF_SUCCESS)
3259 return rcStrict;
3260 uint32_t uNewEsp = uFrame.pu32[0];
3261 uint16_t uNewSs = uFrame.pu32[1];
3262 uint16_t uNewEs = uFrame.pu32[2];
3263 uint16_t uNewDs = uFrame.pu32[3];
3264 uint16_t uNewFs = uFrame.pu32[4];
3265 uint16_t uNewGs = uFrame.pu32[5];
3266 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3267 if (rcStrict != VINF_SUCCESS)
3268 return rcStrict;
3269
3270 /*
3271 * Commit the operation.
3272 */
3273 uNewFlags &= X86_EFL_LIVE_MASK;
3274 uNewFlags |= X86_EFL_RA1_MASK;
3275#ifdef DBGFTRACE_ENABLED
3276 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3277 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3278#endif
3279 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3280
3281 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3282 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3283 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3284 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3285 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3286 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3287 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3288 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3289 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3290 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
3291 | (3 << IEM_F_X86_CPL_SHIFT)
3292 | IEM_F_MODE_X86_16BIT_PROT_V86;
3293
3294 /* Flush the prefetch buffer. */
3295 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3296
3297/** @todo single stepping */
3298 return VINF_SUCCESS;
3299}
3300
3301
3302/**
3303 * Implements iret for protected mode returning via a nested task.
3304 *
3305 * @param enmEffOpSize The effective operand size.
3306 */
3307IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3308{
3309 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3310#ifndef IEM_IMPLEMENTS_TASKSWITCH
3311 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3312#else
3313 RT_NOREF_PV(enmEffOpSize);
3314
3315 /*
3316 * Read the segment selector in the link-field of the current TSS.
3317 */
3318 RTSEL uSelRet;
3319 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3320 if (rcStrict != VINF_SUCCESS)
3321 return rcStrict;
3322
3323 /*
3324 * Fetch the returning task's TSS descriptor from the GDT.
3325 */
3326 if (uSelRet & X86_SEL_LDT)
3327 {
3328 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3329 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3330 }
3331
3332 IEMSELDESC TssDesc;
3333 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3334 if (rcStrict != VINF_SUCCESS)
3335 return rcStrict;
3336
3337 if (TssDesc.Legacy.Gate.u1DescType)
3338 {
3339 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3340 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3341 }
3342
3343 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3344 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3345 {
3346 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3347 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3348 }
3349
3350 if (!TssDesc.Legacy.Gate.u1Present)
3351 {
3352 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3353 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3354 }
3355
3356 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3357 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3358 0 /* uCr2 */, uSelRet, &TssDesc);
3359#endif
3360}
3361
3362
3363/**
3364 * Implements iret for protected mode
3365 *
3366 * @param enmEffOpSize The effective operand size.
3367 */
3368IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3369{
3370 NOREF(cbInstr);
3371 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3372
3373 /*
3374 * Nested task return.
3375 */
3376 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3377 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3378
3379 /*
3380 * Normal return.
3381 *
3382 * Do the stack bits, but don't commit RSP before everything checks
3383 * out right.
3384 */
3385 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3386 VBOXSTRICTRC rcStrict;
3387 RTCPTRUNION uFrame;
3388 uint16_t uNewCs;
3389 uint32_t uNewEip;
3390 uint32_t uNewFlags;
3391 uint64_t uNewRsp;
3392 if (enmEffOpSize == IEMMODE_32BIT)
3393 {
3394 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
3395 if (rcStrict != VINF_SUCCESS)
3396 return rcStrict;
3397 uNewEip = uFrame.pu32[0];
3398 uNewCs = (uint16_t)uFrame.pu32[1];
3399 uNewFlags = uFrame.pu32[2];
3400 }
3401 else
3402 {
3403 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3404 if (rcStrict != VINF_SUCCESS)
3405 return rcStrict;
3406 uNewEip = uFrame.pu16[0];
3407 uNewCs = uFrame.pu16[1];
3408 uNewFlags = uFrame.pu16[2];
3409 }
3410 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3411 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3412 { /* extremely likely */ }
3413 else
3414 return rcStrict;
3415 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, IEM_GET_CPL(pVCpu)));
3416
3417 /*
3418 * We're hopefully not returning to V8086 mode...
3419 */
3420 if ( (uNewFlags & X86_EFL_VM)
3421 && IEM_GET_CPL(pVCpu) == 0)
3422 {
3423 Assert(enmEffOpSize == IEMMODE_32BIT);
3424 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3425 }
3426
3427 /*
3428 * Protected mode.
3429 */
3430 /* Read the CS descriptor. */
3431 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3432 {
3433 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3434 return iemRaiseGeneralProtectionFault0(pVCpu);
3435 }
3436
3437 IEMSELDESC DescCS;
3438 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3439 if (rcStrict != VINF_SUCCESS)
3440 {
3441 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3442 return rcStrict;
3443 }
3444
3445 /* Must be a code descriptor. */
3446 if (!DescCS.Legacy.Gen.u1DescType)
3447 {
3448 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3449 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3450 }
3451 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3452 {
3453 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3454 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3455 }
3456
3457 /* Privilege checks. */
3458 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3459 {
3460 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3461 {
3462 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3463 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3464 }
3465 }
3466 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3467 {
3468 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3469 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3470 }
3471 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3472 {
3473 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, IEM_GET_CPL(pVCpu)));
3474 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3475 }
3476
3477 /* Present? */
3478 if (!DescCS.Legacy.Gen.u1Present)
3479 {
3480 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3481 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3482 }
3483
3484 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3485
3486 /*
3487 * Return to outer level?
3488 */
3489 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
3490 {
3491 uint16_t uNewSS;
3492 uint32_t uNewESP;
3493 if (enmEffOpSize == IEMMODE_32BIT)
3494 {
3495 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
3496 if (rcStrict != VINF_SUCCESS)
3497 return rcStrict;
3498/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3499 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3500 * bit of the popped SS selector it turns out. */
3501 uNewESP = uFrame.pu32[0];
3502 uNewSS = (uint16_t)uFrame.pu32[1];
3503 }
3504 else
3505 {
3506 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
3507 if (rcStrict != VINF_SUCCESS)
3508 return rcStrict;
3509 uNewESP = uFrame.pu16[0];
3510 uNewSS = uFrame.pu16[1];
3511 }
3512 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3516
3517 /* Read the SS descriptor. */
3518 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3519 {
3520 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3521 return iemRaiseGeneralProtectionFault0(pVCpu);
3522 }
3523
3524 IEMSELDESC DescSS;
3525 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3526 if (rcStrict != VINF_SUCCESS)
3527 {
3528 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3529 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3530 return rcStrict;
3531 }
3532
3533 /* Privilege checks. */
3534 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3535 {
3536 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3537 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3538 }
3539 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3540 {
3541 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3542 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3543 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3544 }
3545
3546 /* Must be a writeable data segment descriptor. */
3547 if (!DescSS.Legacy.Gen.u1DescType)
3548 {
3549 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3550 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3551 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3552 }
3553 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3554 {
3555 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3556 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3557 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3558 }
3559
3560 /* Present? */
3561 if (!DescSS.Legacy.Gen.u1Present)
3562 {
3563 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3564 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3565 }
3566
3567 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3568
3569 /* Check EIP. */
3570 if (uNewEip > cbLimitCS)
3571 {
3572 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3573 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3574 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3575 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3576 }
3577
3578 /*
3579 * Commit the changes, marking CS and SS accessed first since
3580 * that may fail.
3581 */
3582 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3583 {
3584 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3585 if (rcStrict != VINF_SUCCESS)
3586 return rcStrict;
3587 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3588 }
3589 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3590 {
3591 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3592 if (rcStrict != VINF_SUCCESS)
3593 return rcStrict;
3594 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3595 }
3596
3597 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3598 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3599 if (enmEffOpSize != IEMMODE_16BIT)
3600 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3601 if (IEM_GET_CPL(pVCpu) == 0)
3602 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3603 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3604 fEFlagsMask |= X86_EFL_IF;
3605 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3606 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3607 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3608 fEFlagsNew &= ~fEFlagsMask;
3609 fEFlagsNew |= uNewFlags & fEFlagsMask;
3610#ifdef DBGFTRACE_ENABLED
3611 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3612 IEM_GET_CPL(pVCpu), uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3613 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3614#endif
3615
3616 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3617 pVCpu->cpum.GstCtx.rip = uNewEip;
3618 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3619 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3620 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3621 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3622 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3623 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3624
3625 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3626 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3627 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3628 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3629 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3630 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3631 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3632 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3633 else
3634 pVCpu->cpum.GstCtx.rsp = uNewESP;
3635
3636 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3637 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3638 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3639 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3640
3641 iemRecalcExecModeAndCplFlags(pVCpu);
3642
3643 /* Done! */
3644
3645 }
3646 /*
3647 * Return to the same level.
3648 */
3649 else
3650 {
3651 /* Check EIP. */
3652 if (uNewEip > cbLimitCS)
3653 {
3654 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3655 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3656 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3657 }
3658
3659 /*
3660 * Commit the changes, marking CS first since it may fail.
3661 */
3662 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3663 {
3664 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3665 if (rcStrict != VINF_SUCCESS)
3666 return rcStrict;
3667 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3668 }
3669
3670 X86EFLAGS NewEfl;
3671 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3672 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3673 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3674 if (enmEffOpSize != IEMMODE_16BIT)
3675 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3676 if (IEM_GET_CPL(pVCpu) == 0)
3677 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3678 else if (IEM_GET_CPL(pVCpu) <= NewEfl.Bits.u2IOPL)
3679 fEFlagsMask |= X86_EFL_IF;
3680 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3681 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3682 NewEfl.u &= ~fEFlagsMask;
3683 NewEfl.u |= fEFlagsMask & uNewFlags;
3684#ifdef DBGFTRACE_ENABLED
3685 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3686 IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3687 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3688#endif
3689
3690 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3691 pVCpu->cpum.GstCtx.rip = uNewEip;
3692 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3693 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3695 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3696 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3697 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3698 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3699 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3700 else
3701 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3702
3703 iemRecalcExecModeAndCplFlags(pVCpu);
3704
3705 /* Done! */
3706 }
3707
3708 /* Flush the prefetch buffer. */
3709 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3710
3711/** @todo single stepping */
3712 return VINF_SUCCESS;
3713}
3714
3715
3716/**
3717 * Implements iret for long mode
3718 *
3719 * @param enmEffOpSize The effective operand size.
3720 */
3721IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3722{
3723 NOREF(cbInstr);
3724
3725 /*
3726 * Nested task return is not supported in long mode.
3727 */
3728 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3729 {
3730 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3731 return iemRaiseGeneralProtectionFault0(pVCpu);
3732 }
3733
3734 /*
3735 * Normal return.
3736 *
3737 * Do the stack bits, but don't commit RSP before everything checks
3738 * out right.
3739 */
3740 VBOXSTRICTRC rcStrict;
3741 RTCPTRUNION uFrame;
3742 uint64_t uNewRip;
3743 uint16_t uNewCs;
3744 uint16_t uNewSs;
3745 uint32_t uNewFlags;
3746 uint64_t uNewRsp;
3747 if (enmEffOpSize == IEMMODE_64BIT)
3748 {
3749 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
3750 if (rcStrict != VINF_SUCCESS)
3751 return rcStrict;
3752 uNewRip = uFrame.pu64[0];
3753 uNewCs = (uint16_t)uFrame.pu64[1];
3754 uNewFlags = (uint32_t)uFrame.pu64[2];
3755 uNewRsp = uFrame.pu64[3];
3756 uNewSs = (uint16_t)uFrame.pu64[4];
3757 }
3758 else if (enmEffOpSize == IEMMODE_32BIT)
3759 {
3760 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
3761 if (rcStrict != VINF_SUCCESS)
3762 return rcStrict;
3763 uNewRip = uFrame.pu32[0];
3764 uNewCs = (uint16_t)uFrame.pu32[1];
3765 uNewFlags = uFrame.pu32[2];
3766 uNewRsp = uFrame.pu32[3];
3767 uNewSs = (uint16_t)uFrame.pu32[4];
3768 }
3769 else
3770 {
3771 Assert(enmEffOpSize == IEMMODE_16BIT);
3772 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
3773 if (rcStrict != VINF_SUCCESS)
3774 return rcStrict;
3775 uNewRip = uFrame.pu16[0];
3776 uNewCs = uFrame.pu16[1];
3777 uNewFlags = uFrame.pu16[2];
3778 uNewRsp = uFrame.pu16[3];
3779 uNewSs = uFrame.pu16[4];
3780 }
3781 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3782 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3783 { /* extremely like */ }
3784 else
3785 return rcStrict;
3786 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3787
3788 /*
3789 * Check stuff.
3790 */
3791 /* Read the CS descriptor. */
3792 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3793 {
3794 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3795 return iemRaiseGeneralProtectionFault0(pVCpu);
3796 }
3797
3798 IEMSELDESC DescCS;
3799 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3800 if (rcStrict != VINF_SUCCESS)
3801 {
3802 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3803 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3804 return rcStrict;
3805 }
3806
3807 /* Must be a code descriptor. */
3808 if ( !DescCS.Legacy.Gen.u1DescType
3809 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3810 {
3811 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3812 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3813 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3814 }
3815
3816 /* Privilege checks. */
3817 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3818 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3819 {
3820 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3821 {
3822 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3823 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3824 }
3825 }
3826 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3827 {
3828 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3829 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3830 }
3831 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3832 {
3833 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
3834 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3835 }
3836
3837 /* Present? */
3838 if (!DescCS.Legacy.Gen.u1Present)
3839 {
3840 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3841 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3842 }
3843
3844 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3845
3846 /* Read the SS descriptor. */
3847 IEMSELDESC DescSS;
3848 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3849 {
3850 if ( !DescCS.Legacy.Gen.u1Long
3851 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3852 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3853 {
3854 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3855 return iemRaiseGeneralProtectionFault0(pVCpu);
3856 }
3857 /* Make sure SS is sensible, marked as accessed etc. */
3858 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3859 }
3860 else
3861 {
3862 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3863 if (rcStrict != VINF_SUCCESS)
3864 {
3865 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3866 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3867 return rcStrict;
3868 }
3869 }
3870
3871 /* Privilege checks. */
3872 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3873 {
3874 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3875 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3876 }
3877
3878 uint32_t cbLimitSs;
3879 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3880 cbLimitSs = UINT32_MAX;
3881 else
3882 {
3883 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3884 {
3885 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3886 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3887 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3888 }
3889
3890 /* Must be a writeable data segment descriptor. */
3891 if (!DescSS.Legacy.Gen.u1DescType)
3892 {
3893 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3894 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3895 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3896 }
3897 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3898 {
3899 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3900 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3901 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3902 }
3903
3904 /* Present? */
3905 if (!DescSS.Legacy.Gen.u1Present)
3906 {
3907 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3908 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3909 }
3910 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3911 }
3912
3913 /* Check EIP. */
3914 if (DescCS.Legacy.Gen.u1Long)
3915 {
3916 if (!IEM_IS_CANONICAL(uNewRip))
3917 {
3918 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3919 uNewCs, uNewRip, uNewSs, uNewRsp));
3920 return iemRaiseNotCanonical(pVCpu);
3921 }
3922/** @todo check the location of this... Testcase. */
3923 if (RT_LIKELY(!DescCS.Legacy.Gen.u1DefBig))
3924 { /* likely */ }
3925 else
3926 {
3927 Log(("iret %04x:%016RX64/%04x:%016RX64 -> both L and D are set -> #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3928 return iemRaiseGeneralProtectionFault0(pVCpu);
3929 }
3930 }
3931 else
3932 {
3933 if (uNewRip > cbLimitCS)
3934 {
3935 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3936 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3937 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3938 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3939 }
3940 }
3941
3942 /*
3943 * Commit the changes, marking CS and SS accessed first since
3944 * that may fail.
3945 */
3946 /** @todo where exactly are these actually marked accessed by a real CPU? */
3947 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3948 {
3949 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3950 if (rcStrict != VINF_SUCCESS)
3951 return rcStrict;
3952 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3953 }
3954 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3955 {
3956 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3957 if (rcStrict != VINF_SUCCESS)
3958 return rcStrict;
3959 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3960 }
3961
3962 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3963 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3964 if (enmEffOpSize != IEMMODE_16BIT)
3965 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3966 if (IEM_GET_CPL(pVCpu) == 0)
3967 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3968 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3969 fEFlagsMask |= X86_EFL_IF;
3970 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3971 fEFlagsNew &= ~fEFlagsMask;
3972 fEFlagsNew |= uNewFlags & fEFlagsMask;
3973#ifdef DBGFTRACE_ENABLED
3974 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3975 IEM_GET_CPL(pVCpu), uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3976#endif
3977
3978 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3979 pVCpu->cpum.GstCtx.rip = uNewRip;
3980 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3981 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3982 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3983 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3984 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3985 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3986 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3987 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3988 else
3989 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3990 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3991 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3992 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3993 {
3994 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3995 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3996 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3997 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3998 Log2(("iretq new SS: NULL\n"));
3999 }
4000 else
4001 {
4002 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4003 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4004 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
4005 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4006 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4007 }
4008
4009 if (IEM_GET_CPL(pVCpu) != uNewCpl)
4010 {
4011 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
4012 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
4013 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
4014 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
4015 }
4016
4017 iemRecalcExecModeAndCplFlags(pVCpu);
4018
4019 /* Flush the prefetch buffer. */
4020 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
4021
4022/** @todo single stepping */
4023 return VINF_SUCCESS;
4024}
4025
4026
4027/**
4028 * Implements iret.
4029 *
4030 * @param enmEffOpSize The effective operand size.
4031 */
4032IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
4033{
4034 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
4035
4036 if (!IEM_IS_IN_GUEST(pVCpu))
4037 { /* probable */ }
4038#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4039 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4040 {
4041 /*
4042 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
4043 * of this IRET instruction. We need to provide this information as part of some
4044 * VM-exits.
4045 *
4046 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
4047 */
4048 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
4049 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
4050 else
4051 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
4052
4053 /*
4054 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
4055 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
4056 */
4057 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
4058 fBlockingNmi = false;
4059
4060 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
4061 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
4062 }
4063#endif
4064 /*
4065 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
4066 * The NMI is still held pending (which I assume means blocking of further NMIs
4067 * is in effect).
4068 *
4069 * See AMD spec. 15.9 "Instruction Intercepts".
4070 * See AMD spec. 15.21.9 "NMI Support".
4071 */
4072 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
4073 {
4074 Log(("iret: Guest intercept -> #VMEXIT\n"));
4075 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
4076 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4077 }
4078
4079 /*
4080 * Clear NMI blocking, if any, before causing any further exceptions.
4081 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
4082 */
4083 if (fBlockingNmi)
4084 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4085
4086 /*
4087 * Call a mode specific worker.
4088 */
4089 VBOXSTRICTRC rcStrict;
4090 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4091 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
4092 else
4093 {
4094 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
4095 if (IEM_IS_64BIT_CODE(pVCpu))
4096 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
4097 else
4098 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
4099 }
4100
4101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4102 /*
4103 * Clear NMI unblocking IRET state with the completion of IRET.
4104 */
4105 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4106 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
4107#endif
4108 return rcStrict;
4109}
4110
4111
4112static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4113{
4114 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4115
4116 pHid->Sel = uSel;
4117 pHid->ValidSel = uSel;
4118 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4119}
4120
4121
4122static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
4123{
4124 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4125
4126 /* The base is in the first three bytes. */
4127 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
4128 /* The attributes are in the fourth byte. */
4129 pHid->Attr.u = pbMem[3];
4130 pHid->Attr.u &= ~(X86DESCATTR_L | X86DESCATTR_D); /* (just to be on the safe side) */
4131 /* The limit is in the last two bytes. */
4132 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
4133}
4134
4135
4136/**
4137 * Implements 286 LOADALL (286 CPUs only).
4138 */
4139IEM_CIMPL_DEF_0(iemCImpl_loadall286)
4140{
4141 NOREF(cbInstr);
4142
4143 /* Data is loaded from a buffer at 800h. No checks are done on the
4144 * validity of loaded state.
4145 *
4146 * LOADALL only loads the internal CPU state, it does not access any
4147 * GDT, LDT, or similar tables.
4148 */
4149
4150 if (IEM_GET_CPL(pVCpu) != 0)
4151 {
4152 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4153 return iemRaiseGeneralProtectionFault0(pVCpu);
4154 }
4155
4156 uint8_t const *pbMem = NULL;
4157 uint16_t const *pa16Mem;
4158 uint8_t const *pa8Mem;
4159 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
4160 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
4161 if (rcStrict != VINF_SUCCESS)
4162 return rcStrict;
4163
4164 /* The MSW is at offset 0x06. */
4165 pa16Mem = (uint16_t const *)(pbMem + 0x06);
4166 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
4167 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4168 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4169 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
4170
4171 CPUMSetGuestCR0(pVCpu, uNewCr0);
4172 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
4173
4174 /* Inform PGM if mode changed. */
4175 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
4176 {
4177 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
4178 AssertRCReturn(rc, rc);
4179 /* ignore informational status codes */
4180 }
4181 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
4182 false /* fForce */);
4183
4184 /* TR selector is at offset 0x16. */
4185 pa16Mem = (uint16_t const *)(pbMem + 0x16);
4186 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
4187 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
4188 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4189
4190 /* Followed by FLAGS... */
4191 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
4192 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
4193
4194 /* LDT is at offset 0x1C. */
4195 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
4196 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
4197 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
4198 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4199
4200 /* Segment registers are at offset 0x1E. */
4201 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
4202 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
4203 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
4204 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
4205 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
4206
4207 /* GPRs are at offset 0x26. */
4208 pa16Mem = (uint16_t const *)(pbMem + 0x26);
4209 pVCpu->cpum.GstCtx.di = pa16Mem[0];
4210 pVCpu->cpum.GstCtx.si = pa16Mem[1];
4211 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
4212 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
4213 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
4214 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
4215 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
4216 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
4217
4218 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
4219 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
4220 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
4221 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
4222 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
4223
4224 /* GDTR contents are at offset 0x4E, 6 bytes. */
4225 RTGCPHYS GCPtrBase;
4226 uint16_t cbLimit;
4227 pa8Mem = pbMem + 0x4E;
4228 /* NB: Fourth byte "should be zero"; we are ignoring it. */
4229 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4230 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4231 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4232
4233 /* IDTR contents are at offset 0x5A, 6 bytes. */
4234 pa8Mem = pbMem + 0x5A;
4235 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4236 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4237 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4238
4239 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
4240 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
4241 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
4242 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
4243 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4244 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
4245
4246 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
4247 if (rcStrict != VINF_SUCCESS)
4248 return rcStrict;
4249
4250 /*
4251 * The CPL may change and protected mode may change enabled. It is taken
4252 * from the "DPL fields of the SS and CS descriptor caches" but there is no
4253 * word as to what happens if those are not identical (probably bad things).
4254 */
4255 iemRecalcExecModeAndCplFlags(pVCpu);
4256 Assert(IEM_IS_16BIT_CODE(pVCpu));
4257
4258 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
4259
4260 /* Flush the prefetch buffer. */
4261 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4262
4263/** @todo single stepping */
4264 return rcStrict;
4265}
4266
4267
4268/**
4269 * Implements SYSCALL (AMD and Intel64).
4270 */
4271IEM_CIMPL_DEF_0(iemCImpl_syscall)
4272{
4273 /** @todo hack, LOADALL should be decoded as such on a 286. */
4274 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4275 return iemCImpl_loadall286(pVCpu, cbInstr);
4276
4277 /*
4278 * Check preconditions.
4279 *
4280 * Note that CPUs described in the documentation may load a few odd values
4281 * into CS and SS than we allow here. This has yet to be checked on real
4282 * hardware.
4283 */
4284 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4285 {
4286 Log(("syscall: Not enabled in EFER -> #UD\n"));
4287 return iemRaiseUndefinedOpcode(pVCpu);
4288 }
4289 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4290 {
4291 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4292 return iemRaiseGeneralProtectionFault0(pVCpu);
4293 }
4294 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4295 {
4296 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4297 return iemRaiseUndefinedOpcode(pVCpu);
4298 }
4299
4300 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4301
4302 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4303 /** @todo what about LDT selectors? Shouldn't matter, really. */
4304 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4305 uint16_t uNewSs = uNewCs + 8;
4306 if (uNewCs == 0 || uNewSs == 0)
4307 {
4308 /** @todo Neither Intel nor AMD document this check. */
4309 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4310 return iemRaiseGeneralProtectionFault0(pVCpu);
4311 }
4312
4313 /* Long mode and legacy mode differs. */
4314 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4315 {
4316 uint64_t uNewRip = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4317
4318 /* This test isn't in the docs, but I'm not trusting the guys writing
4319 the MSRs to have validated the values as canonical like they should. */
4320 if (!IEM_IS_CANONICAL(uNewRip))
4321 {
4322 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4323 Log(("syscall: New RIP not canonical -> #UD\n"));
4324 return iemRaiseUndefinedOpcode(pVCpu);
4325 }
4326
4327 /*
4328 * Commit it.
4329 */
4330 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4331 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4332 pVCpu->cpum.GstCtx.rip = uNewRip;
4333
4334 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4335 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4336 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4337 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4338
4339 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4340 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4341
4342 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4343 | IEM_F_MODE_X86_64BIT;
4344 }
4345 else
4346 {
4347 /*
4348 * Commit it.
4349 */
4350 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4351 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4352 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4353 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4354
4355 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4356 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4357
4358 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4359 | IEM_F_MODE_X86_32BIT_PROT
4360 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4361 }
4362 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4363 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4364 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4365 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4366 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4367
4368 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4369 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4370 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4371 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4372 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4373
4374 /* Flush the prefetch buffer. */
4375 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4376
4377/** @todo single step */
4378 return VINF_SUCCESS;
4379}
4380
4381
4382/**
4383 * Implements SYSRET (AMD and Intel64).
4384 *
4385 * @param enmEffOpSize The effective operand size.
4386 */
4387IEM_CIMPL_DEF_1(iemCImpl_sysret, IEMMODE, enmEffOpSize)
4388
4389{
4390 RT_NOREF_PV(cbInstr);
4391
4392 /*
4393 * Check preconditions.
4394 *
4395 * Note that CPUs described in the documentation may load a few odd values
4396 * into CS and SS than we allow here. This has yet to be checked on real
4397 * hardware.
4398 */
4399 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4400 {
4401 Log(("sysret: Not enabled in EFER -> #UD\n"));
4402 return iemRaiseUndefinedOpcode(pVCpu);
4403 }
4404 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4405 {
4406 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4407 return iemRaiseUndefinedOpcode(pVCpu);
4408 }
4409 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4410 {
4411 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4412 return iemRaiseGeneralProtectionFault0(pVCpu);
4413 }
4414 if (IEM_GET_CPL(pVCpu) != 0)
4415 {
4416 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4417 return iemRaiseGeneralProtectionFault0(pVCpu);
4418 }
4419
4420 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4421
4422 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4423 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4424 uint16_t uNewSs = uNewCs + 8;
4425 if (enmEffOpSize == IEMMODE_64BIT)
4426 uNewCs += 16;
4427 if (uNewCs == 0 || uNewSs == 0)
4428 {
4429 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4430 return iemRaiseGeneralProtectionFault0(pVCpu);
4431 }
4432
4433 /*
4434 * Commit it.
4435 */
4436 bool f32Bit = true;
4437 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4438 {
4439 if (enmEffOpSize == IEMMODE_64BIT)
4440 {
4441 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4442 /* Note! We disregard intel manual regarding the RCX canonical
4443 check, ask intel+xen why AMD doesn't do it. */
4444 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4445 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4446 | (3 << X86DESCATTR_DPL_SHIFT);
4447 f32Bit = false;
4448 }
4449 else
4450 {
4451 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4452 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4453 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4454 | (3 << X86DESCATTR_DPL_SHIFT);
4455 }
4456 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4457 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4458 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4459 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4460 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4461 }
4462 else
4463 {
4464 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4465 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4466 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4467 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4468 | (3 << X86DESCATTR_DPL_SHIFT);
4469 }
4470 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4471 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4472 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4473 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4474 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4475
4476 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4477 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4478 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4479 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4480 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4481 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4482 * on sysret. */
4483 /** @todo intel documents SS.BASE and SS.LIMIT as being set as well as the
4484 * TYPE, S, DPL, P, B and G flag bits. */
4485
4486 if (!f32Bit)
4487 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4488 | (3 << IEM_F_X86_CPL_SHIFT)
4489 | IEM_F_MODE_X86_64BIT;
4490 else
4491 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4492 | (3 << IEM_F_X86_CPL_SHIFT)
4493 | IEM_F_MODE_X86_32BIT_PROT
4494 /** @todo sort out the SS.BASE/LIM/ATTR claim by AMD and maybe we can switch to
4495 * iemCalc32BitFlatIndicatorDsEs and move this up into the above branch. */
4496 | iemCalc32BitFlatIndicator(pVCpu);
4497
4498 /* Flush the prefetch buffer. */
4499 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4500
4501/** @todo single step */
4502 return VINF_SUCCESS;
4503}
4504
4505
4506/**
4507 * Implements SYSENTER (Intel, 32-bit AMD).
4508 */
4509IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4510{
4511 RT_NOREF(cbInstr);
4512
4513 /*
4514 * Check preconditions.
4515 *
4516 * Note that CPUs described in the documentation may load a few odd values
4517 * into CS and SS than we allow here. This has yet to be checked on real
4518 * hardware.
4519 */
4520 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4521 {
4522 Log(("sysenter: not supported -=> #UD\n"));
4523 return iemRaiseUndefinedOpcode(pVCpu);
4524 }
4525 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4526 {
4527 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4528 return iemRaiseGeneralProtectionFault0(pVCpu);
4529 }
4530 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4531 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4532 {
4533 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4534 return iemRaiseUndefinedOpcode(pVCpu);
4535 }
4536 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4537 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4538 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4539 {
4540 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4541 return iemRaiseGeneralProtectionFault0(pVCpu);
4542 }
4543
4544 /* This test isn't in the docs, it's just a safeguard against missing
4545 canonical checks when writing the registers. */
4546 if (RT_LIKELY( !fIsLongMode
4547 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4548 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4549 { /* likely */ }
4550 else
4551 {
4552 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4553 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4554 return iemRaiseUndefinedOpcode(pVCpu);
4555 }
4556
4557/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4558
4559 /*
4560 * Update registers and commit.
4561 */
4562 if (fIsLongMode)
4563 {
4564 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4565 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4566 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4567 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4569 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4570 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4571 | IEM_F_MODE_X86_64BIT;
4572 }
4573 else
4574 {
4575 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
4576 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4577 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4578 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4579 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4580 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4581 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4582 | IEM_F_MODE_X86_32BIT
4583 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4584 }
4585 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4586 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4587 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4588 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4589 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4590
4591 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4592 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4593 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4594 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4595 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4596 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4597 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4598
4599 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4600 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4601 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4602
4603 /* Flush the prefetch buffer. */
4604 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4605
4606/** @todo single stepping */
4607 return VINF_SUCCESS;
4608}
4609
4610
4611/**
4612 * Implements SYSEXIT (Intel, 32-bit AMD).
4613 *
4614 * @param enmEffOpSize The effective operand size.
4615 */
4616IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4617{
4618 RT_NOREF(cbInstr);
4619
4620 /*
4621 * Check preconditions.
4622 *
4623 * Note that CPUs described in the documentation may load a few odd values
4624 * into CS and SS than we allow here. This has yet to be checked on real
4625 * hardware.
4626 */
4627 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4628 {
4629 Log(("sysexit: not supported -=> #UD\n"));
4630 return iemRaiseUndefinedOpcode(pVCpu);
4631 }
4632 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4633 {
4634 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4635 return iemRaiseGeneralProtectionFault0(pVCpu);
4636 }
4637 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4638 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4639 {
4640 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4641 return iemRaiseUndefinedOpcode(pVCpu);
4642 }
4643 if (IEM_GET_CPL(pVCpu) != 0)
4644 {
4645 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4646 return iemRaiseGeneralProtectionFault0(pVCpu);
4647 }
4648 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4649 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4650 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4651 {
4652 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4653 return iemRaiseGeneralProtectionFault0(pVCpu);
4654 }
4655
4656 /*
4657 * Update registers and commit.
4658 */
4659 if (enmEffOpSize == IEMMODE_64BIT)
4660 {
4661 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4662 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4663 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4664 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4665 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4666 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4667 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4668 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4669 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4670 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4671
4672 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4673 | (3 << IEM_F_X86_CPL_SHIFT)
4674 | IEM_F_MODE_X86_64BIT;
4675 }
4676 else
4677 {
4678 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4679 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4680 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4681 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4682 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4683 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4684 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4685 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4686 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4687 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4688
4689 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4690 | (3 << IEM_F_X86_CPL_SHIFT)
4691 | IEM_F_MODE_X86_32BIT
4692 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4693 }
4694 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4695 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4696 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4697
4698 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4699 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4700 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4701 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4702 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4703 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4704
4705/** @todo single stepping */
4706
4707 /* Flush the prefetch buffer. */
4708 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4709
4710 return VINF_SUCCESS;
4711}
4712
4713
4714/**
4715 * Completes a MOV SReg,XXX or POP SReg instruction.
4716 *
4717 * When not modifying SS or when we're already in an interrupt shadow we
4718 * can update RIP and finish the instruction the normal way.
4719 *
4720 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4721 * both TF and DBx events. The TF will be ignored while the DBx ones will
4722 * be delayed till the next instruction boundrary. For more details see
4723 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4724 */
4725DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4726{
4727 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4728 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4729
4730 iemRegAddToRip(pVCpu, cbInstr);
4731 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4732 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4733
4734 return VINF_SUCCESS;
4735}
4736
4737
4738/**
4739 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4740 *
4741 * @param pVCpu The cross context virtual CPU structure of the calling
4742 * thread.
4743 * @param iSegReg The segment register number (valid).
4744 * @param uSel The new selector value.
4745 */
4746static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4747{
4748 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4749 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4750 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4751
4752 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4753
4754 /*
4755 * Real mode and V8086 mode are easy.
4756 */
4757 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4758 {
4759 *pSel = uSel;
4760 pHid->u64Base = (uint32_t)uSel << 4;
4761 pHid->ValidSel = uSel;
4762 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4763#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4764 /** @todo Does the CPU actually load limits and attributes in the
4765 * real/V8086 mode segment load case? It doesn't for CS in far
4766 * jumps... Affects unreal mode. */
4767 pHid->u32Limit = 0xffff;
4768 pHid->Attr.u = 0;
4769 pHid->Attr.n.u1Present = 1;
4770 pHid->Attr.n.u1DescType = 1;
4771 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4772 ? X86_SEL_TYPE_RW
4773 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4774#endif
4775
4776 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
4777 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
4778 { /* likely */ }
4779 else if (uSel != 0)
4780 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4781 else
4782 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4783 | iemCalc32BitFlatIndicator(pVCpu);
4784 }
4785 /*
4786 * Protected / long mode - null segment.
4787 *
4788 * Check if it's a null segment selector value first, that's OK for DS, ES,
4789 * FS and GS. If not null, then we have to load and parse the descriptor.
4790 */
4791 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4792 {
4793 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4794 if (iSegReg == X86_SREG_SS)
4795 {
4796 /* In 64-bit kernel mode, the stack can be 0 because of the way
4797 interrupts are dispatched. AMD seems to have a slighly more
4798 relaxed relationship to SS.RPL than intel does. */
4799 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4800 if ( !IEM_IS_64BIT_CODE(pVCpu)
4801 || IEM_GET_CPL(pVCpu) > 2
4802 || ( uSel != IEM_GET_CPL(pVCpu)
4803 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4804 {
4805 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4806 return iemRaiseGeneralProtectionFault0(pVCpu);
4807 }
4808 }
4809
4810 *pSel = uSel; /* Not RPL, remember :-) */
4811 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4812 if (iSegReg == X86_SREG_SS)
4813 pHid->Attr.u |= IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT;
4814
4815 /* This will affect the FLAT 32-bit mode flag: */
4816 if ( iSegReg < X86_SREG_FS
4817 && IEM_IS_32BIT_CODE(pVCpu))
4818 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4819 }
4820 /*
4821 * Protected / long mode.
4822 */
4823 else
4824 {
4825 /* Fetch the descriptor. */
4826 IEMSELDESC Desc;
4827 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4828 if (rcStrict != VINF_SUCCESS)
4829 return rcStrict;
4830
4831 /* Check GPs first. */
4832 if (!Desc.Legacy.Gen.u1DescType)
4833 {
4834 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4835 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4836 }
4837 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4838 {
4839 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4840 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4841 {
4842 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4843 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4844 }
4845 if ((uSel & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
4846 {
4847 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, IEM_GET_CPL(pVCpu)));
4848 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4849 }
4850 if (Desc.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
4851 {
4852 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4853 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4854 }
4855 }
4856 else
4857 {
4858 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4859 {
4860 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4861 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4862 }
4863 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4864 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4865 {
4866#if 0 /* this is what intel says. */
4867 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4868 && IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4869 {
4870 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4871 iSegReg, uSel, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4872 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4873 }
4874#else /* this is what makes more sense. */
4875 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4876 {
4877 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4878 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4879 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4880 }
4881 if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4882 {
4883 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4884 iSegReg, uSel, IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4885 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4886 }
4887#endif
4888 }
4889 }
4890
4891 /* Is it there? */
4892 if (!Desc.Legacy.Gen.u1Present)
4893 {
4894 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4895 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4896 }
4897
4898 /* The base and limit. */
4899 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4900 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4901
4902 /*
4903 * Ok, everything checked out fine. Now set the accessed bit before
4904 * committing the result into the registers.
4905 */
4906 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4907 {
4908 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4909 if (rcStrict != VINF_SUCCESS)
4910 return rcStrict;
4911 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4912 }
4913
4914 /* commit */
4915 *pSel = uSel;
4916 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4917 pHid->u32Limit = cbLimit;
4918 pHid->u64Base = u64Base;
4919 pHid->ValidSel = uSel;
4920 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4921
4922 /** @todo check if the hidden bits are loaded correctly for 64-bit
4923 * mode. */
4924
4925 /* This will affect the FLAT 32-bit mode flag: */
4926 if ( iSegReg < X86_SREG_FS
4927 && IEM_IS_32BIT_CODE(pVCpu))
4928 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4929 | iemCalc32BitFlatIndicator(pVCpu);
4930 }
4931
4932 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4933 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4934 return VINF_SUCCESS;
4935}
4936
4937
4938/**
4939 * Implements 'mov SReg, r/m'.
4940 *
4941 * @param iSegReg The segment register number (valid).
4942 * @param uSel The new selector value.
4943 */
4944IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4945{
4946 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4947 if (rcStrict == VINF_SUCCESS)
4948 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4949 return rcStrict;
4950}
4951
4952
4953/**
4954 * Implements 'pop SReg'.
4955 *
4956 * @param iSegReg The segment register number (valid).
4957 * @param enmEffOpSize The efficient operand size (valid).
4958 */
4959IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4960{
4961 VBOXSTRICTRC rcStrict;
4962
4963 /*
4964 * Read the selector off the stack and join paths with mov ss, reg.
4965 */
4966 RTUINT64U TmpRsp;
4967 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4968 switch (enmEffOpSize)
4969 {
4970 case IEMMODE_16BIT:
4971 {
4972 uint16_t uSel;
4973 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4974 if (rcStrict == VINF_SUCCESS)
4975 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4976 break;
4977 }
4978
4979 case IEMMODE_32BIT:
4980 {
4981 uint32_t u32Value;
4982 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4983 if (rcStrict == VINF_SUCCESS)
4984 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4985 break;
4986 }
4987
4988 case IEMMODE_64BIT:
4989 {
4990 uint64_t u64Value;
4991 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4992 if (rcStrict == VINF_SUCCESS)
4993 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4994 break;
4995 }
4996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4997 }
4998
4999 /*
5000 * If the load succeeded, commit the stack change and finish the instruction.
5001 */
5002 if (rcStrict == VINF_SUCCESS)
5003 {
5004 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
5005 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
5006 }
5007
5008 return rcStrict;
5009}
5010
5011
5012/**
5013 * Implements lgs, lfs, les, lds & lss.
5014 */
5015IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
5016{
5017 /*
5018 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
5019 */
5020 /** @todo verify and test that mov, pop and lXs works the segment
5021 * register loading in the exact same way. */
5022 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
5023 if (rcStrict == VINF_SUCCESS)
5024 {
5025 switch (enmEffOpSize)
5026 {
5027 case IEMMODE_16BIT:
5028 iemGRegStoreU16(pVCpu, iGReg, offSeg);
5029 break;
5030 case IEMMODE_32BIT:
5031 case IEMMODE_64BIT:
5032 iemGRegStoreU64(pVCpu, iGReg, offSeg);
5033 break;
5034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5035 }
5036 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5037 }
5038 return rcStrict;
5039}
5040
5041
5042/**
5043 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
5044 *
5045 * @retval VINF_SUCCESS on success.
5046 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
5047 * @retval iemMemFetchSysU64 return value.
5048 *
5049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5050 * @param uSel The selector value.
5051 * @param fAllowSysDesc Whether system descriptors are OK or not.
5052 * @param pDesc Where to return the descriptor on success.
5053 */
5054static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
5055{
5056 pDesc->Long.au64[0] = 0;
5057 pDesc->Long.au64[1] = 0;
5058
5059 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
5060 return VINF_IEM_SELECTOR_NOT_OK;
5061
5062 /* Within the table limits? */
5063 RTGCPTR GCPtrBase;
5064 if (uSel & X86_SEL_LDT)
5065 {
5066 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5067 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
5068 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
5069 return VINF_IEM_SELECTOR_NOT_OK;
5070 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
5071 }
5072 else
5073 {
5074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5075 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
5076 return VINF_IEM_SELECTOR_NOT_OK;
5077 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
5078 }
5079
5080 /* Fetch the descriptor. */
5081 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5082 if (rcStrict != VINF_SUCCESS)
5083 return rcStrict;
5084 if (!pDesc->Legacy.Gen.u1DescType)
5085 {
5086 if (!fAllowSysDesc)
5087 return VINF_IEM_SELECTOR_NOT_OK;
5088 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
5089 {
5090 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
5091 if (rcStrict != VINF_SUCCESS)
5092 return rcStrict;
5093 }
5094
5095 }
5096
5097 return VINF_SUCCESS;
5098}
5099
5100
5101/**
5102 * Implements verr (fWrite = false) and verw (fWrite = true).
5103 */
5104IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
5105{
5106 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
5107
5108 /** @todo figure whether the accessed bit is set or not. */
5109
5110 bool fAccessible = true;
5111 IEMSELDESC Desc;
5112 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
5113 if (rcStrict == VINF_SUCCESS)
5114 {
5115 /* Check the descriptor, order doesn't matter much here. */
5116 if ( !Desc.Legacy.Gen.u1DescType
5117 || !Desc.Legacy.Gen.u1Present)
5118 fAccessible = false;
5119 else
5120 {
5121 if ( fWrite
5122 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
5123 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
5124 fAccessible = false;
5125
5126 /** @todo testcase for the conforming behavior. */
5127 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
5128 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
5129 {
5130 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5131 fAccessible = false;
5132 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
5133 fAccessible = false;
5134 }
5135 }
5136
5137 }
5138 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5139 fAccessible = false;
5140 else
5141 return rcStrict;
5142
5143 /* commit */
5144 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
5145
5146 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5147}
5148
5149
5150/**
5151 * Implements LAR and LSL with 64-bit operand size.
5152 *
5153 * @returns VINF_SUCCESS.
5154 * @param pu64Dst Pointer to the destination register.
5155 * @param uSel The selector to load details for.
5156 * @param fIsLar true = LAR, false = LSL.
5157 */
5158IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
5159{
5160 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
5161
5162 /** @todo figure whether the accessed bit is set or not. */
5163
5164 bool fDescOk = true;
5165 IEMSELDESC Desc;
5166 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
5167 if (rcStrict == VINF_SUCCESS)
5168 {
5169 /*
5170 * Check the descriptor type.
5171 */
5172 if (!Desc.Legacy.Gen.u1DescType)
5173 {
5174 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
5175 {
5176 if (Desc.Long.Gen.u5Zeros)
5177 fDescOk = false;
5178 else
5179 switch (Desc.Long.Gen.u4Type)
5180 {
5181 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
5182 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
5183 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
5184 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
5185 break;
5186 case AMD64_SEL_TYPE_SYS_CALL_GATE:
5187 fDescOk = fIsLar;
5188 break;
5189 default:
5190 fDescOk = false;
5191 break;
5192 }
5193 }
5194 else
5195 {
5196 switch (Desc.Long.Gen.u4Type)
5197 {
5198 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
5199 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
5200 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
5201 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
5202 case X86_SEL_TYPE_SYS_LDT:
5203 break;
5204 case X86_SEL_TYPE_SYS_286_CALL_GATE:
5205 case X86_SEL_TYPE_SYS_TASK_GATE:
5206 case X86_SEL_TYPE_SYS_386_CALL_GATE:
5207 fDescOk = fIsLar;
5208 break;
5209 default:
5210 fDescOk = false;
5211 break;
5212 }
5213 }
5214 }
5215 if (fDescOk)
5216 {
5217 /*
5218 * Check the RPL/DPL/CPL interaction..
5219 */
5220 /** @todo testcase for the conforming behavior. */
5221 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
5222 || !Desc.Legacy.Gen.u1DescType)
5223 {
5224 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5225 fDescOk = false;
5226 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
5227 fDescOk = false;
5228 }
5229 }
5230
5231 if (fDescOk)
5232 {
5233 /*
5234 * All fine, start committing the result.
5235 */
5236 if (fIsLar)
5237 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
5238 else
5239 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
5240 }
5241
5242 }
5243 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5244 fDescOk = false;
5245 else
5246 return rcStrict;
5247
5248 /* commit flags value and advance rip. */
5249 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5250 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5251}
5252
5253
5254/**
5255 * Implements LAR and LSL with 16-bit operand size.
5256 *
5257 * @returns VINF_SUCCESS.
5258 * @param pu16Dst Pointer to the destination register.
5259 * @param uSel The selector to load details for.
5260 * @param fIsLar true = LAR, false = LSL.
5261 */
5262IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5263{
5264 uint64_t u64TmpDst = *pu16Dst;
5265 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5266 *pu16Dst = u64TmpDst;
5267 return VINF_SUCCESS;
5268}
5269
5270
5271/**
5272 * Implements lgdt.
5273 *
5274 * @param iEffSeg The segment of the new gdtr contents
5275 * @param GCPtrEffSrc The address of the new gdtr contents.
5276 * @param enmEffOpSize The effective operand size.
5277 */
5278IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5279{
5280 if (IEM_GET_CPL(pVCpu) != 0)
5281 return iemRaiseGeneralProtectionFault0(pVCpu);
5282 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5283
5284 if (!IEM_IS_IN_GUEST(pVCpu))
5285 { /* probable */ }
5286 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5287 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5288 {
5289 Log(("lgdt: Guest intercept -> VM-exit\n"));
5290 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5291 }
5292 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5293 {
5294 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5295 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5296 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5297 }
5298
5299 /*
5300 * Fetch the limit and base address.
5301 */
5302 uint16_t cbLimit;
5303 RTGCPTR GCPtrBase;
5304 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5305 if (rcStrict == VINF_SUCCESS)
5306 {
5307 if ( !IEM_IS_64BIT_CODE(pVCpu)
5308 || X86_IS_CANONICAL(GCPtrBase))
5309 {
5310 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5311 if (rcStrict == VINF_SUCCESS)
5312 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5313 }
5314 else
5315 {
5316 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5317 return iemRaiseGeneralProtectionFault0(pVCpu);
5318 }
5319 }
5320 return rcStrict;
5321}
5322
5323
5324/**
5325 * Implements sgdt.
5326 *
5327 * @param iEffSeg The segment where to store the gdtr content.
5328 * @param GCPtrEffDst The address where to store the gdtr content.
5329 */
5330IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5331{
5332 /*
5333 * Join paths with sidt.
5334 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5335 * you really must know.
5336 */
5337 if (!IEM_IS_IN_GUEST(pVCpu))
5338 { /* probable */ }
5339 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5340 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5341 {
5342 Log(("sgdt: Guest intercept -> VM-exit\n"));
5343 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5344 }
5345 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5346 {
5347 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5348 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5349 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5350 }
5351
5352 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5353 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5354 if (rcStrict == VINF_SUCCESS)
5355 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5356 return rcStrict;
5357}
5358
5359
5360/**
5361 * Implements lidt.
5362 *
5363 * @param iEffSeg The segment of the new idtr contents
5364 * @param GCPtrEffSrc The address of the new idtr contents.
5365 * @param enmEffOpSize The effective operand size.
5366 */
5367IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5368{
5369 if (IEM_GET_CPL(pVCpu) != 0)
5370 return iemRaiseGeneralProtectionFault0(pVCpu);
5371 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5372
5373 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5374 { /* probable */ }
5375 else
5376 {
5377 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5378 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5379 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5380 }
5381
5382 /*
5383 * Fetch the limit and base address.
5384 */
5385 uint16_t cbLimit;
5386 RTGCPTR GCPtrBase;
5387 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5388 if (rcStrict == VINF_SUCCESS)
5389 {
5390 if ( !IEM_IS_64BIT_CODE(pVCpu)
5391 || X86_IS_CANONICAL(GCPtrBase))
5392 {
5393 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5394 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5395 }
5396 else
5397 {
5398 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5399 return iemRaiseGeneralProtectionFault0(pVCpu);
5400 }
5401 }
5402 return rcStrict;
5403}
5404
5405
5406/**
5407 * Implements sidt.
5408 *
5409 * @param iEffSeg The segment where to store the idtr content.
5410 * @param GCPtrEffDst The address where to store the idtr content.
5411 */
5412IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5413{
5414 /*
5415 * Join paths with sgdt.
5416 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5417 * you really must know.
5418 */
5419 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5420 { /* probable */ }
5421 else
5422 {
5423 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5424 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5425 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5426 }
5427
5428 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5429 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5430 if (rcStrict == VINF_SUCCESS)
5431 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5432 return rcStrict;
5433}
5434
5435
5436/**
5437 * Implements lldt.
5438 *
5439 * @param uNewLdt The new LDT selector value.
5440 */
5441IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5442{
5443 /*
5444 * Check preconditions.
5445 */
5446 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5447 {
5448 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5449 return iemRaiseUndefinedOpcode(pVCpu);
5450 }
5451 if (IEM_GET_CPL(pVCpu) != 0)
5452 {
5453 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, IEM_GET_CPL(pVCpu)));
5454 return iemRaiseGeneralProtectionFault0(pVCpu);
5455 }
5456
5457 /* Nested-guest VMX intercept (SVM is after all checks). */
5458 /** @todo testcase: exit vs check order. */
5459 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5460 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5461 { /* probable */ }
5462 else
5463 {
5464 Log(("lldt: Guest intercept -> VM-exit\n"));
5465 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5466 }
5467
5468 if (uNewLdt & X86_SEL_LDT)
5469 {
5470 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5471 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5472 }
5473
5474 /*
5475 * Now, loading a NULL selector is easy.
5476 */
5477 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5478 {
5479 /* Nested-guest SVM intercept. */
5480 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5481 { /* probable */ }
5482 else
5483 {
5484 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5485 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5486 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5487 }
5488
5489 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5490 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5491 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5492 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5493 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5494 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5495 {
5496 /* AMD-V seems to leave the base and limit alone. */
5497 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5498 }
5499 else
5500 {
5501 /* VT-x (Intel 3960x) seems to be doing the following. */
5502 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5503 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5504 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5505 }
5506
5507 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5508 }
5509
5510 /*
5511 * Read the descriptor.
5512 */
5513 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5514 IEMSELDESC Desc;
5515 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5516 if (rcStrict != VINF_SUCCESS)
5517 return rcStrict;
5518
5519 /* Check GPs first. */
5520 if (Desc.Legacy.Gen.u1DescType)
5521 {
5522 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5523 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5524 }
5525 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5526 {
5527 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5528 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5529 }
5530 uint64_t u64Base;
5531 if (!IEM_IS_LONG_MODE(pVCpu))
5532 u64Base = X86DESC_BASE(&Desc.Legacy);
5533 else
5534 {
5535 if (Desc.Long.Gen.u5Zeros)
5536 {
5537 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5538 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5539 }
5540
5541 u64Base = X86DESC64_BASE(&Desc.Long);
5542 if (!IEM_IS_CANONICAL(u64Base))
5543 {
5544 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5545 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5546 }
5547 }
5548
5549 /* NP */
5550 if (!Desc.Legacy.Gen.u1Present)
5551 {
5552 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5553 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5554 }
5555
5556 /* Nested-guest SVM intercept. */
5557 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5558 { /* probable */ }
5559 else
5560 {
5561 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5562 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5563 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5564 }
5565
5566 /*
5567 * It checks out alright, update the registers.
5568 */
5569/** @todo check if the actual value is loaded or if the RPL is dropped */
5570 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5571 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5572 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5573 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5574 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5575 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5576
5577 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5578}
5579
5580
5581/**
5582 * Implements sldt GReg
5583 *
5584 * @param iGReg The general register to store the CRx value in.
5585 * @param enmEffOpSize The operand size.
5586 */
5587IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5588{
5589 if (!IEM_IS_IN_GUEST(pVCpu))
5590 { /* probable */ }
5591 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5592 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5593 {
5594 Log(("sldt: Guest intercept -> VM-exit\n"));
5595 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5596 }
5597 else
5598 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5599
5600 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5601 switch (enmEffOpSize)
5602 {
5603 case IEMMODE_16BIT:
5604 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5605 break;
5606 case IEMMODE_32BIT:
5607 case IEMMODE_64BIT:
5608 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5609 break;
5610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5611 }
5612 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5613}
5614
5615
5616/**
5617 * Implements sldt mem.
5618 *
5619 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5620 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5621 */
5622IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5623{
5624 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5625
5626 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5627 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5628 if (rcStrict == VINF_SUCCESS)
5629 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5630 return rcStrict;
5631}
5632
5633
5634/**
5635 * Implements ltr.
5636 *
5637 * @param uNewTr The new TSS selector value.
5638 */
5639IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5640{
5641 /*
5642 * Check preconditions.
5643 */
5644 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5645 {
5646 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5647 return iemRaiseUndefinedOpcode(pVCpu);
5648 }
5649 if (IEM_GET_CPL(pVCpu) != 0)
5650 {
5651 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, IEM_GET_CPL(pVCpu)));
5652 return iemRaiseGeneralProtectionFault0(pVCpu);
5653 }
5654 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5655 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5656 { /* probable */ }
5657 else
5658 {
5659 Log(("ltr: Guest intercept -> VM-exit\n"));
5660 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5661 }
5662 if (uNewTr & X86_SEL_LDT)
5663 {
5664 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5665 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5666 }
5667 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5668 {
5669 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5670 return iemRaiseGeneralProtectionFault0(pVCpu);
5671 }
5672 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5673 { /* probable */ }
5674 else
5675 {
5676 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5677 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5678 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5679 }
5680
5681 /*
5682 * Read the descriptor.
5683 */
5684 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5685 IEMSELDESC Desc;
5686 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5687 if (rcStrict != VINF_SUCCESS)
5688 return rcStrict;
5689
5690 /* Check GPs first. */
5691 if (Desc.Legacy.Gen.u1DescType)
5692 {
5693 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5694 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5695 }
5696 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5697 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5698 || IEM_IS_LONG_MODE(pVCpu)) )
5699 {
5700 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5701 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5702 }
5703 uint64_t u64Base;
5704 if (!IEM_IS_LONG_MODE(pVCpu))
5705 u64Base = X86DESC_BASE(&Desc.Legacy);
5706 else
5707 {
5708 if (Desc.Long.Gen.u5Zeros)
5709 {
5710 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5711 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5712 }
5713
5714 u64Base = X86DESC64_BASE(&Desc.Long);
5715 if (!IEM_IS_CANONICAL(u64Base))
5716 {
5717 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5718 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5719 }
5720 }
5721
5722 /* NP */
5723 if (!Desc.Legacy.Gen.u1Present)
5724 {
5725 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5726 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5727 }
5728
5729 /*
5730 * Set it busy.
5731 * Note! Intel says this should lock down the whole descriptor, but we'll
5732 * restrict our selves to 32-bit for now due to lack of inline
5733 * assembly and such.
5734 */
5735 void *pvDesc;
5736 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
5737 IEM_ACCESS_DATA_RW, 0);
5738 if (rcStrict != VINF_SUCCESS)
5739 return rcStrict;
5740 switch ((uintptr_t)pvDesc & 3)
5741 {
5742 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5743 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5744 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5745 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5746 }
5747 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5748 if (rcStrict != VINF_SUCCESS)
5749 return rcStrict;
5750 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5751
5752 /*
5753 * It checks out alright, update the registers.
5754 */
5755/** @todo check if the actual value is loaded or if the RPL is dropped */
5756 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5757 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5758 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5759 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5760 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5761 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5762
5763 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5764}
5765
5766
5767/**
5768 * Implements str GReg
5769 *
5770 * @param iGReg The general register to store the CRx value in.
5771 * @param enmEffOpSize The operand size.
5772 */
5773IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5774{
5775 if (!IEM_IS_IN_GUEST(pVCpu))
5776 { /* probable */ }
5777 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5778 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5779 {
5780 Log(("str_reg: Guest intercept -> VM-exit\n"));
5781 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5782 }
5783 else
5784 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5785
5786 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5787 switch (enmEffOpSize)
5788 {
5789 case IEMMODE_16BIT:
5790 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5791 break;
5792 case IEMMODE_32BIT:
5793 case IEMMODE_64BIT:
5794 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5795 break;
5796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5797 }
5798 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5799}
5800
5801
5802/**
5803 * Implements str mem.
5804 *
5805 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5806 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5807 */
5808IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5809{
5810 if (!IEM_IS_IN_GUEST(pVCpu))
5811 { /* probable */ }
5812 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5813 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5814 {
5815 Log(("str_mem: Guest intercept -> VM-exit\n"));
5816 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5817 }
5818 else
5819 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5820
5821 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5822 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5823 if (rcStrict == VINF_SUCCESS)
5824 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5825 return rcStrict;
5826}
5827
5828
5829/**
5830 * Implements mov GReg,CRx.
5831 *
5832 * @param iGReg The general register to store the CRx value in.
5833 * @param iCrReg The CRx register to read (valid).
5834 */
5835IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5836{
5837 if (IEM_GET_CPL(pVCpu) != 0)
5838 return iemRaiseGeneralProtectionFault0(pVCpu);
5839 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5840
5841 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5842 { /* probable */ }
5843 else
5844 {
5845 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5846 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5847 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5848 }
5849
5850 /* Read it. */
5851 uint64_t crX;
5852 switch (iCrReg)
5853 {
5854 case 0:
5855 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5856 crX = pVCpu->cpum.GstCtx.cr0;
5857 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5858 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5859 break;
5860 case 2:
5861 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5862 crX = pVCpu->cpum.GstCtx.cr2;
5863 break;
5864 case 3:
5865 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5866 crX = pVCpu->cpum.GstCtx.cr3;
5867 break;
5868 case 4:
5869 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5870 crX = pVCpu->cpum.GstCtx.cr4;
5871 break;
5872 case 8:
5873 {
5874 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5875 if (!IEM_IS_IN_GUEST(pVCpu))
5876 { /* probable */ }
5877#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5878 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5879 {
5880 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5881 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5882 return rcStrict;
5883
5884 /*
5885 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5886 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5887 * are cleared.
5888 *
5889 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5890 */
5891 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5892 {
5893 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5894 crX = (uTpr >> 4) & 0xf;
5895 break;
5896 }
5897 }
5898#endif
5899#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5900 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
5901 {
5902 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5903 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5904 {
5905 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5906 break;
5907 }
5908 }
5909#endif
5910 uint8_t uTpr;
5911 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5912 if (RT_SUCCESS(rc))
5913 crX = uTpr >> 4;
5914 else
5915 crX = 0;
5916 break;
5917 }
5918 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5919 }
5920
5921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5922 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5923 { /* probable */ }
5924 else
5925 switch (iCrReg)
5926 {
5927 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5928 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5929 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5930 case 3:
5931 {
5932 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5933 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5934 return rcStrict;
5935 break;
5936 }
5937 }
5938#endif
5939
5940 /* Store it. */
5941 if (IEM_IS_64BIT_CODE(pVCpu))
5942 iemGRegStoreU64(pVCpu, iGReg, crX);
5943 else
5944 iemGRegStoreU64(pVCpu, iGReg, (uint32_t)crX);
5945
5946 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5947}
5948
5949
5950/**
5951 * Implements smsw GReg.
5952 *
5953 * @param iGReg The general register to store the CRx value in.
5954 * @param enmEffOpSize The operand size.
5955 */
5956IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5957{
5958 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5959
5960#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5961 uint64_t u64MaskedCr0;
5962 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5963 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5964 else
5965 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5966 uint64_t const u64GuestCr0 = u64MaskedCr0;
5967#else
5968 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5969#endif
5970
5971 switch (enmEffOpSize)
5972 {
5973 case IEMMODE_16BIT:
5974 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5975 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0);
5976 /* Unused bits are set on 386 and older CPU: */
5977 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5978 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xffe0);
5979 else
5980 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xfff0);
5981 break;
5982
5983/** @todo testcase for bits 31:16. We're not doing that correctly. */
5984
5985 case IEMMODE_32BIT:
5986 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5987 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0);
5988 else /** @todo test this! */
5989 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0 | UINT32_C(0x7fffffe0)); /* Unused bits are set on 386. */
5990 break;
5991
5992 case IEMMODE_64BIT:
5993 iemGRegStoreU64(pVCpu, iGReg, u64GuestCr0);
5994 break;
5995
5996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5997 }
5998
5999 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6000}
6001
6002
6003/**
6004 * Implements smsw mem.
6005 *
6006 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
6007 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
6008 */
6009IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6010{
6011 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
6012 if (!IEM_IS_IN_GUEST(pVCpu))
6013 { /* probable */ }
6014 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6015 u64GuestCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
6016 else
6017 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
6018
6019 uint16_t u16Value;
6020 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
6021 u16Value = (uint16_t)u64GuestCr0;
6022 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
6023 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
6024 else
6025 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
6026
6027 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
6028 if (rcStrict == VINF_SUCCESS)
6029 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6030 return rcStrict;
6031}
6032
6033
6034/**
6035 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
6036 */
6037#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
6038 do \
6039 { \
6040 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
6041 if (RT_SUCCESS(rcX)) \
6042 { /* likely */ } \
6043 else \
6044 { \
6045 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
6046 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
6047 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
6048 } \
6049 } while (0)
6050
6051
6052/**
6053 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
6054 *
6055 * @param iCrReg The CRx register to write (valid).
6056 * @param uNewCrX The new value.
6057 * @param enmAccessCrX The instruction that caused the CrX load.
6058 * @param iGReg The general register in case of a 'mov CRx,GReg'
6059 * instruction.
6060 */
6061IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
6062{
6063 VBOXSTRICTRC rcStrict;
6064 int rc;
6065#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
6066 RT_NOREF2(iGReg, enmAccessCrX);
6067#endif
6068
6069 /*
6070 * Try store it.
6071 * Unfortunately, CPUM only does a tiny bit of the work.
6072 */
6073 switch (iCrReg)
6074 {
6075 case 0:
6076 {
6077 /*
6078 * Perform checks.
6079 */
6080 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6081
6082 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
6083 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
6084
6085 /* ET is hardcoded on 486 and later. */
6086 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
6087 uNewCrX |= X86_CR0_ET;
6088 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
6089 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
6090 {
6091 uNewCrX &= fValid;
6092 uNewCrX |= X86_CR0_ET;
6093 }
6094 else
6095 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
6096
6097 /* Check for reserved bits. */
6098 if (uNewCrX & ~(uint64_t)fValid)
6099 {
6100 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6101 return iemRaiseGeneralProtectionFault0(pVCpu);
6102 }
6103
6104 /* Check for invalid combinations. */
6105 if ( (uNewCrX & X86_CR0_PG)
6106 && !(uNewCrX & X86_CR0_PE) )
6107 {
6108 Log(("Trying to set CR0.PG without CR0.PE\n"));
6109 return iemRaiseGeneralProtectionFault0(pVCpu);
6110 }
6111
6112 if ( !(uNewCrX & X86_CR0_CD)
6113 && (uNewCrX & X86_CR0_NW) )
6114 {
6115 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
6116 return iemRaiseGeneralProtectionFault0(pVCpu);
6117 }
6118
6119 if ( !(uNewCrX & X86_CR0_PG)
6120 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
6121 {
6122 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
6123 return iemRaiseGeneralProtectionFault0(pVCpu);
6124 }
6125
6126 /* Long mode consistency checks. */
6127 if ( (uNewCrX & X86_CR0_PG)
6128 && !(uOldCrX & X86_CR0_PG)
6129 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
6130 {
6131 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
6132 {
6133 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
6134 return iemRaiseGeneralProtectionFault0(pVCpu);
6135 }
6136 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
6137 {
6138 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
6139 return iemRaiseGeneralProtectionFault0(pVCpu);
6140 }
6141 }
6142
6143 /** @todo testcase: what happens if we disable paging while in 64-bit code? */
6144
6145 if (!IEM_IS_IN_GUEST(pVCpu))
6146 { /* probable */ }
6147#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6148 /* Check for bits that must remain set or cleared in VMX operation,
6149 see Intel spec. 23.8 "Restrictions on VMX operation". */
6150 else if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6151 {
6152 uint64_t const uCr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6153 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
6154 {
6155 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
6156 return iemRaiseGeneralProtectionFault0(pVCpu);
6157 }
6158
6159 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
6160 if (uNewCrX & ~uCr0Fixed1)
6161 {
6162 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
6163 return iemRaiseGeneralProtectionFault0(pVCpu);
6164 }
6165 }
6166#endif
6167 /*
6168 * SVM nested-guest CR0 write intercepts.
6169 */
6170 else if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
6171 {
6172 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6173 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6174 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
6175 }
6176 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
6177 {
6178 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
6179 if ( enmAccessCrX == IEMACCESSCRX_LMSW
6180 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
6181 {
6182 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
6183 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
6184 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6185 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
6186 }
6187 }
6188
6189 /*
6190 * Change EFER.LMA if entering or leaving long mode.
6191 */
6192 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
6193 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
6194 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
6195 {
6196 if (uNewCrX & X86_CR0_PG)
6197 NewEFER |= MSR_K6_EFER_LMA;
6198 else
6199 NewEFER &= ~MSR_K6_EFER_LMA;
6200
6201 CPUMSetGuestEFER(pVCpu, NewEFER);
6202 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
6203 }
6204
6205 /*
6206 * Inform PGM.
6207 */
6208 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
6209 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
6210 {
6211 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
6212 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
6213 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6214 { /* likely */ }
6215 else
6216 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6217 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6218 AssertRCReturn(rc, rc);
6219 /* ignore informational status codes */
6220 }
6221
6222 /*
6223 * Change CR0.
6224 */
6225 CPUMSetGuestCR0(pVCpu, uNewCrX);
6226 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
6227
6228 /* Update the fExec flags if PE changed. */
6229 if ((uNewCrX ^ uOldCrX) & X86_CR0_PE)
6230 iemRecalcExecModeAndCplFlags(pVCpu);
6231
6232 /*
6233 * Inform PGM some more...
6234 */
6235 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6236 false /* fForce */);
6237 break;
6238 }
6239
6240 /*
6241 * CR2 can be changed without any restrictions.
6242 */
6243 case 2:
6244 {
6245 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
6246 { /* probable */ }
6247 else
6248 {
6249 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6250 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6251 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
6252 }
6253 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
6254 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
6255 rcStrict = VINF_SUCCESS;
6256 break;
6257 }
6258
6259 /*
6260 * CR3 is relatively simple, although AMD and Intel have different
6261 * accounts of how setting reserved bits are handled. We take intel's
6262 * word for the lower bits and AMD's for the high bits (63:52). The
6263 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
6264 * on this.
6265 */
6266 /** @todo Testcase: Setting reserved bits in CR3, especially before
6267 * enabling paging. */
6268 case 3:
6269 {
6270 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
6271
6272 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
6273 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
6274 && (uNewCrX & RT_BIT_64(63)))
6275 {
6276 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
6277 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
6278 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
6279 * Paging-Structure Caches". */
6280 uNewCrX &= ~RT_BIT_64(63);
6281 }
6282
6283 /* Check / mask the value. */
6284#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6285 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6286 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6287 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6288 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6289#else
6290 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6291#endif
6292 if (uNewCrX & fInvPhysMask)
6293 {
6294 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6295 * very vague in this area. As mentioned above, need testcase on real
6296 * hardware... Sigh. */
6297 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6298 return iemRaiseGeneralProtectionFault0(pVCpu);
6299 }
6300
6301 uint64_t fValid;
6302 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6303 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6304 {
6305 /** @todo Redundant? This value has already been validated above. */
6306 fValid = UINT64_C(0x000fffffffffffff);
6307 }
6308 else
6309 fValid = UINT64_C(0xffffffff);
6310 if (uNewCrX & ~fValid)
6311 {
6312 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6313 uNewCrX, uNewCrX & ~fValid));
6314 uNewCrX &= fValid;
6315 }
6316
6317 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6318 { /* probable */ }
6319 else
6320 {
6321 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6322 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6323 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6324 }
6325
6326 /* Inform PGM. */
6327 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6328 {
6329 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6330 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6331 { /* likely */ }
6332 else
6333 {
6334 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6335 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6336 }
6337 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6338 AssertRCReturn(rc, rc);
6339 /* ignore informational status codes */
6340 }
6341
6342 /* Make the change. */
6343 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6344 AssertRCSuccessReturn(rc, rc);
6345
6346 rcStrict = VINF_SUCCESS;
6347 break;
6348 }
6349
6350 /*
6351 * CR4 is a bit more tedious as there are bits which cannot be cleared
6352 * under some circumstances and such.
6353 */
6354 case 4:
6355 {
6356 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6357 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6358
6359 /* Reserved bits. */
6360 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6361 if (uNewCrX & ~(uint64_t)fValid)
6362 {
6363 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6364 return iemRaiseGeneralProtectionFault0(pVCpu);
6365 }
6366
6367 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6368 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6369
6370 /* PCIDE check. */
6371 if ( fPcide
6372 && ( !fLongMode
6373 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6374 {
6375 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6376 return iemRaiseGeneralProtectionFault0(pVCpu);
6377 }
6378
6379 /* PAE check. */
6380 if ( fLongMode
6381 && (uOldCrX & X86_CR4_PAE)
6382 && !(uNewCrX & X86_CR4_PAE))
6383 {
6384 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6385 return iemRaiseGeneralProtectionFault0(pVCpu);
6386 }
6387
6388 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6389 { /* probable */ }
6390 else
6391 {
6392 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6393 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6394 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6395 }
6396
6397 /* Check for bits that must remain set or cleared in VMX operation,
6398 see Intel spec. 23.8 "Restrictions on VMX operation". */
6399 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
6400 { /* probable */ }
6401 else
6402 {
6403 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6404 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6405 {
6406 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6407 return iemRaiseGeneralProtectionFault0(pVCpu);
6408 }
6409
6410 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6411 if (uNewCrX & ~uCr4Fixed1)
6412 {
6413 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6414 return iemRaiseGeneralProtectionFault0(pVCpu);
6415 }
6416 }
6417
6418 /*
6419 * Notify PGM.
6420 */
6421 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6422 {
6423 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6424 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6425 { /* likely */ }
6426 else
6427 {
6428 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6429 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6430 }
6431 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6432 AssertRCReturn(rc, rc);
6433 /* ignore informational status codes */
6434 }
6435
6436 /*
6437 * Change it.
6438 */
6439 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6440 AssertRCSuccessReturn(rc, rc);
6441 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6442
6443 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6444 false /* fForce */);
6445 break;
6446 }
6447
6448 /*
6449 * CR8 maps to the APIC TPR.
6450 */
6451 case 8:
6452 {
6453 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6454 if (uNewCrX & ~(uint64_t)0xf)
6455 {
6456 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6457 return iemRaiseGeneralProtectionFault0(pVCpu);
6458 }
6459
6460 if (!IEM_IS_IN_GUEST(pVCpu))
6461 { /* probable */ }
6462#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6463 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6464 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6465 {
6466 /*
6467 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6468 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6469 * cleared. Following this the processor performs TPR virtualization.
6470 *
6471 * However, we should not perform TPR virtualization immediately here but
6472 * after this instruction has completed.
6473 *
6474 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6475 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6476 */
6477 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6478 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6479 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6480 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6481 rcStrict = VINF_SUCCESS;
6482 break;
6483 }
6484#endif
6485#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6486 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
6487 {
6488 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6489 {
6490 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6491 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6492 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6493 }
6494
6495 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6496 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6497 {
6498 rcStrict = VINF_SUCCESS;
6499 break;
6500 }
6501 }
6502#endif
6503 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6504 APICSetTpr(pVCpu, u8Tpr);
6505 rcStrict = VINF_SUCCESS;
6506 break;
6507 }
6508
6509 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6510 }
6511
6512 /*
6513 * Advance the RIP on success.
6514 */
6515 if (RT_SUCCESS(rcStrict))
6516 {
6517 if (rcStrict != VINF_SUCCESS)
6518 iemSetPassUpStatus(pVCpu, rcStrict);
6519 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6520 }
6521
6522 return rcStrict;
6523}
6524
6525
6526/**
6527 * Implements mov CRx,GReg.
6528 *
6529 * @param iCrReg The CRx register to write (valid).
6530 * @param iGReg The general register to load the CRx value from.
6531 */
6532IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6533{
6534 if (IEM_GET_CPL(pVCpu) != 0)
6535 return iemRaiseGeneralProtectionFault0(pVCpu);
6536 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6537
6538 /*
6539 * Read the new value from the source register and call common worker.
6540 */
6541 uint64_t uNewCrX;
6542 if (IEM_IS_64BIT_CODE(pVCpu))
6543 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6544 else
6545 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6546
6547#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6548 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6549 { /* probable */ }
6550 else
6551 {
6552 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6553 switch (iCrReg)
6554 {
6555 case 0:
6556 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6557 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6558 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6559 }
6560 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6561 return rcStrict;
6562 }
6563#endif
6564
6565 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6566}
6567
6568
6569/**
6570 * Implements 'LMSW r/m16'
6571 *
6572 * @param u16NewMsw The new value.
6573 * @param GCPtrEffDst The guest-linear address of the source operand in case
6574 * of a memory operand. For register operand, pass
6575 * NIL_RTGCPTR.
6576 */
6577IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6578{
6579 if (IEM_GET_CPL(pVCpu) != 0)
6580 return iemRaiseGeneralProtectionFault0(pVCpu);
6581 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6582 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6583
6584#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6585 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6586 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6587 { /* probable */ }
6588 else
6589 {
6590 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6591 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6592 return rcStrict;
6593 }
6594#else
6595 RT_NOREF_PV(GCPtrEffDst);
6596#endif
6597
6598 /*
6599 * Compose the new CR0 value and call common worker.
6600 */
6601 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6602 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6603 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6604}
6605
6606
6607/**
6608 * Implements 'CLTS'.
6609 */
6610IEM_CIMPL_DEF_0(iemCImpl_clts)
6611{
6612 if (IEM_GET_CPL(pVCpu) != 0)
6613 return iemRaiseGeneralProtectionFault0(pVCpu);
6614
6615 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6616 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6617 uNewCr0 &= ~X86_CR0_TS;
6618
6619#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6620 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6621 { /* probable */ }
6622 else
6623 {
6624 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6625 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6626 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6627 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6628 return rcStrict;
6629 }
6630#endif
6631
6632 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6633}
6634
6635
6636/**
6637 * Implements mov GReg,DRx.
6638 *
6639 * @param iGReg The general register to store the DRx value in.
6640 * @param iDrReg The DRx register to read (0-7).
6641 */
6642IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6643{
6644#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6645 /*
6646 * Check nested-guest VMX intercept.
6647 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6648 * over CPL and CR4.DE and even DR4/DR5 checks.
6649 *
6650 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6651 */
6652 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6653 { /* probable */ }
6654 else
6655 {
6656 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6657 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6658 return rcStrict;
6659 }
6660#endif
6661
6662 /*
6663 * Check preconditions.
6664 */
6665 /* Raise GPs. */
6666 if (IEM_GET_CPL(pVCpu) != 0)
6667 return iemRaiseGeneralProtectionFault0(pVCpu);
6668 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6669 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6670
6671 /** @todo \#UD in outside ring-0 too? */
6672 if (iDrReg == 4 || iDrReg == 5)
6673 {
6674 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6675 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6676 {
6677 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6678 return iemRaiseGeneralProtectionFault0(pVCpu);
6679 }
6680 iDrReg += 2;
6681 }
6682
6683 /* Raise #DB if general access detect is enabled. */
6684 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6685 {
6686 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6687 return iemRaiseDebugException(pVCpu);
6688 }
6689
6690 /*
6691 * Read the debug register and store it in the specified general register.
6692 */
6693 uint64_t drX;
6694 switch (iDrReg)
6695 {
6696 case 0:
6697 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6698 drX = pVCpu->cpum.GstCtx.dr[0];
6699 break;
6700 case 1:
6701 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6702 drX = pVCpu->cpum.GstCtx.dr[1];
6703 break;
6704 case 2:
6705 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6706 drX = pVCpu->cpum.GstCtx.dr[2];
6707 break;
6708 case 3:
6709 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6710 drX = pVCpu->cpum.GstCtx.dr[3];
6711 break;
6712 case 6:
6713 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6714 drX = pVCpu->cpum.GstCtx.dr[6];
6715 drX |= X86_DR6_RA1_MASK;
6716 drX &= ~X86_DR6_RAZ_MASK;
6717 break;
6718 case 7:
6719 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6720 drX = pVCpu->cpum.GstCtx.dr[7];
6721 drX |=X86_DR7_RA1_MASK;
6722 drX &= ~X86_DR7_RAZ_MASK;
6723 break;
6724 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6725 }
6726
6727 /** @todo SVM nested-guest intercept for DR8-DR15? */
6728 /*
6729 * Check for any SVM nested-guest intercepts for the DRx read.
6730 */
6731 if (!IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6732 { /* probable */ }
6733 else
6734 {
6735 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6736 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6737 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6738 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6739 }
6740
6741 if (IEM_IS_64BIT_CODE(pVCpu))
6742 iemGRegStoreU64(pVCpu, iGReg, drX);
6743 else
6744 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)drX);
6745
6746 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6747}
6748
6749
6750/**
6751 * Implements mov DRx,GReg.
6752 *
6753 * @param iDrReg The DRx register to write (valid).
6754 * @param iGReg The general register to load the DRx value from.
6755 */
6756IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6757{
6758#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6759 /*
6760 * Check nested-guest VMX intercept.
6761 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6762 * over CPL and CR4.DE and even DR4/DR5 checks.
6763 *
6764 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6765 */
6766 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6767 { /* probable */ }
6768 else
6769 {
6770 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6771 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6772 return rcStrict;
6773 }
6774#endif
6775
6776 /*
6777 * Check preconditions.
6778 */
6779 if (IEM_GET_CPL(pVCpu) != 0)
6780 return iemRaiseGeneralProtectionFault0(pVCpu);
6781 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6782 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6783
6784 if (iDrReg == 4 || iDrReg == 5)
6785 {
6786 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6787 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6788 {
6789 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6790 return iemRaiseGeneralProtectionFault0(pVCpu);
6791 }
6792 iDrReg += 2;
6793 }
6794
6795 /* Raise #DB if general access detect is enabled. */
6796 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6797 * \#GP? */
6798 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6799 {
6800 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6801 return iemRaiseDebugException(pVCpu);
6802 }
6803
6804 /*
6805 * Read the new value from the source register.
6806 */
6807 uint64_t uNewDrX;
6808 if (IEM_IS_64BIT_CODE(pVCpu))
6809 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6810 else
6811 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6812
6813 /*
6814 * Adjust it.
6815 */
6816 switch (iDrReg)
6817 {
6818 case 0:
6819 case 1:
6820 case 2:
6821 case 3:
6822 /* nothing to adjust */
6823 break;
6824
6825 case 6:
6826 if (uNewDrX & X86_DR6_MBZ_MASK)
6827 {
6828 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6829 return iemRaiseGeneralProtectionFault0(pVCpu);
6830 }
6831 uNewDrX |= X86_DR6_RA1_MASK;
6832 uNewDrX &= ~X86_DR6_RAZ_MASK;
6833 break;
6834
6835 case 7:
6836 if (uNewDrX & X86_DR7_MBZ_MASK)
6837 {
6838 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6839 return iemRaiseGeneralProtectionFault0(pVCpu);
6840 }
6841 uNewDrX |= X86_DR7_RA1_MASK;
6842 uNewDrX &= ~X86_DR7_RAZ_MASK;
6843 break;
6844
6845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6846 }
6847
6848 /** @todo SVM nested-guest intercept for DR8-DR15? */
6849 /*
6850 * Check for any SVM nested-guest intercepts for the DRx write.
6851 */
6852 if (!IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6853 { /* probable */ }
6854 else
6855 {
6856 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6857 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6858 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6859 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6860 }
6861
6862 /*
6863 * Do the actual setting.
6864 */
6865 if (iDrReg < 4)
6866 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6867 else if (iDrReg == 6)
6868 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6869
6870 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6871 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6872
6873 /*
6874 * Re-init hardware breakpoint summary if it was DR7 that got changed.
6875 */
6876 if (iDrReg == 7)
6877 iemRecalcExecDbgFlags(pVCpu);
6878
6879 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6880}
6881
6882
6883/**
6884 * Implements mov GReg,TRx.
6885 *
6886 * @param iGReg The general register to store the
6887 * TRx value in.
6888 * @param iTrReg The TRx register to read (6/7).
6889 */
6890IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6891{
6892 /*
6893 * Check preconditions. NB: This instruction is 386/486 only.
6894 */
6895
6896 /* Raise GPs. */
6897 if (IEM_GET_CPL(pVCpu) != 0)
6898 return iemRaiseGeneralProtectionFault0(pVCpu);
6899 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6900
6901 if (iTrReg < 6 || iTrReg > 7)
6902 {
6903 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6904 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6905 return iemRaiseGeneralProtectionFault0(pVCpu);
6906 }
6907
6908 /*
6909 * Read the test register and store it in the specified general register.
6910 * This is currently a dummy implementation that only exists to satisfy
6911 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6912 * TR6/TR7 registers. Software which actually depends on the TR values
6913 * (different on 386/486) is exceedingly rare.
6914 */
6915 uint32_t trX;
6916 switch (iTrReg)
6917 {
6918 case 6:
6919 trX = 0; /* Currently a dummy. */
6920 break;
6921 case 7:
6922 trX = 0; /* Currently a dummy. */
6923 break;
6924 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6925 }
6926
6927 iemGRegStoreU32(pVCpu, iGReg, trX);
6928
6929 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6930}
6931
6932
6933/**
6934 * Implements mov TRx,GReg.
6935 *
6936 * @param iTrReg The TRx register to write (valid).
6937 * @param iGReg The general register to load the TRx
6938 * value from.
6939 */
6940IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6941{
6942 /*
6943 * Check preconditions. NB: This instruction is 386/486 only.
6944 */
6945
6946 /* Raise GPs. */
6947 if (IEM_GET_CPL(pVCpu) != 0)
6948 return iemRaiseGeneralProtectionFault0(pVCpu);
6949 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6950
6951 if (iTrReg < 6 || iTrReg > 7)
6952 {
6953 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6954 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6955 return iemRaiseGeneralProtectionFault0(pVCpu);
6956 }
6957
6958 /*
6959 * Read the new value from the source register.
6960 */
6961 uint32_t uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6962
6963 /*
6964 * Here we would do the actual setting if this weren't a dummy implementation.
6965 * This is currently a dummy implementation that only exists to prevent
6966 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6967 */
6968 RT_NOREF(uNewTrX);
6969
6970 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6971}
6972
6973
6974/**
6975 * Implements 'INVLPG m'.
6976 *
6977 * @param GCPtrPage The effective address of the page to invalidate.
6978 * @remarks Updates the RIP.
6979 */
6980IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6981{
6982 /* ring-0 only. */
6983 if (IEM_GET_CPL(pVCpu) != 0)
6984 return iemRaiseGeneralProtectionFault0(pVCpu);
6985 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6986 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6987
6988 if (!IEM_IS_IN_GUEST(pVCpu))
6989 { /* probable */ }
6990#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6991 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6992 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6993 {
6994 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6995 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6996 }
6997#endif
6998 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6999 {
7000 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
7001 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7002 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
7003 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
7004 }
7005
7006 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
7007 if (rc == VINF_SUCCESS)
7008 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7009 if (rc == VINF_PGM_SYNC_CR3)
7010 {
7011 iemSetPassUpStatus(pVCpu, rc);
7012 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7013 }
7014
7015 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7016 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
7017 return rc;
7018}
7019
7020
7021/**
7022 * Implements INVPCID.
7023 *
7024 * @param iEffSeg The segment of the invpcid descriptor.
7025 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
7026 * @param uInvpcidType The invalidation type.
7027 * @remarks Updates the RIP.
7028 */
7029IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
7030{
7031 /*
7032 * Check preconditions.
7033 */
7034 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
7035 return iemRaiseUndefinedOpcode(pVCpu);
7036
7037 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
7038 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7039 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID)))
7040 { /* likely */ }
7041 else
7042 {
7043 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
7044 return iemRaiseUndefinedOpcode(pVCpu);
7045 }
7046
7047 if (IEM_GET_CPL(pVCpu) != 0)
7048 {
7049 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
7050 return iemRaiseGeneralProtectionFault0(pVCpu);
7051 }
7052
7053 if (IEM_IS_V86_MODE(pVCpu))
7054 {
7055 Log(("invpcid: v8086 mode -> #GP(0)\n"));
7056 return iemRaiseGeneralProtectionFault0(pVCpu);
7057 }
7058
7059 /*
7060 * Check nested-guest intercept.
7061 *
7062 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
7063 * both set. We have already checked the former earlier in this function.
7064 *
7065 * CPL and virtual-8086 mode checks take priority over this VM-exit.
7066 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
7067 */
7068 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7069 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
7070 { /* probable */ }
7071 else
7072 {
7073 Log(("invpcid: Guest intercept -> #VM-exit\n"));
7074 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
7075 }
7076
7077 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
7078 {
7079 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
7080 return iemRaiseGeneralProtectionFault0(pVCpu);
7081 }
7082 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
7083
7084 /*
7085 * Fetch the invpcid descriptor from guest memory.
7086 */
7087 RTUINT128U uDesc;
7088 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
7089 if (rcStrict == VINF_SUCCESS)
7090 {
7091 /*
7092 * Validate the descriptor.
7093 */
7094 if (uDesc.s.Lo > 0xfff)
7095 {
7096 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
7097 return iemRaiseGeneralProtectionFault0(pVCpu);
7098 }
7099
7100 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
7101 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
7102 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
7103 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
7104 switch (uInvpcidType)
7105 {
7106 case X86_INVPCID_TYPE_INDV_ADDR:
7107 {
7108 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
7109 {
7110 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
7111 return iemRaiseGeneralProtectionFault0(pVCpu);
7112 }
7113 if ( !(uCr4 & X86_CR4_PCIDE)
7114 && uPcid != 0)
7115 {
7116 Log(("invpcid: invalid pcid %#x\n", uPcid));
7117 return iemRaiseGeneralProtectionFault0(pVCpu);
7118 }
7119
7120 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
7121 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
7122 break;
7123 }
7124
7125 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
7126 {
7127 if ( !(uCr4 & X86_CR4_PCIDE)
7128 && uPcid != 0)
7129 {
7130 Log(("invpcid: invalid pcid %#x\n", uPcid));
7131 return iemRaiseGeneralProtectionFault0(pVCpu);
7132 }
7133 /* Invalidate all mappings associated with PCID except global translations. */
7134 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
7135 break;
7136 }
7137
7138 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
7139 {
7140 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
7141 break;
7142 }
7143
7144 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
7145 {
7146 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
7147 break;
7148 }
7149 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7150 }
7151 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7152 }
7153 return rcStrict;
7154}
7155
7156
7157/**
7158 * Implements INVD.
7159 */
7160IEM_CIMPL_DEF_0(iemCImpl_invd)
7161{
7162 if (IEM_GET_CPL(pVCpu) != 0)
7163 {
7164 Log(("invd: CPL != 0 -> #GP(0)\n"));
7165 return iemRaiseGeneralProtectionFault0(pVCpu);
7166 }
7167
7168 if (!IEM_IS_IN_GUEST(pVCpu))
7169 { /* probable */ }
7170 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7171 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
7172 else
7173 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0, cbInstr);
7174
7175 /* We currently take no action here. */
7176 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7177}
7178
7179
7180/**
7181 * Implements WBINVD.
7182 */
7183IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
7184{
7185 if (IEM_GET_CPL(pVCpu) != 0)
7186 {
7187 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
7188 return iemRaiseGeneralProtectionFault0(pVCpu);
7189 }
7190
7191 if (!IEM_IS_IN_GUEST(pVCpu))
7192 { /* probable */ }
7193 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7194 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
7195 else
7196 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0, cbInstr);
7197
7198 /* We currently take no action here. */
7199 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7200}
7201
7202
7203/** Opcode 0x0f 0xaa. */
7204IEM_CIMPL_DEF_0(iemCImpl_rsm)
7205{
7206 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0, cbInstr);
7207 NOREF(cbInstr);
7208 return iemRaiseUndefinedOpcode(pVCpu);
7209}
7210
7211
7212/**
7213 * Implements RDTSC.
7214 */
7215IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
7216{
7217 /*
7218 * Check preconditions.
7219 */
7220 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
7221 return iemRaiseUndefinedOpcode(pVCpu);
7222
7223 if (IEM_GET_CPL(pVCpu) != 0)
7224 {
7225 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7226 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7227 {
7228 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7229 return iemRaiseGeneralProtectionFault0(pVCpu);
7230 }
7231 }
7232
7233 if (!IEM_IS_IN_GUEST(pVCpu))
7234 { /* probable */ }
7235 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7236 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7237 {
7238 Log(("rdtsc: Guest intercept -> VM-exit\n"));
7239 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
7240 }
7241 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
7242 {
7243 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
7244 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7245 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7246 }
7247
7248 /*
7249 * Do the job.
7250 */
7251 uint64_t uTicks = TMCpuTickGet(pVCpu);
7252#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7253 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7254#endif
7255 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7256 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7257 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
7258 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7259}
7260
7261
7262/**
7263 * Implements RDTSC.
7264 */
7265IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
7266{
7267 /*
7268 * Check preconditions.
7269 */
7270 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
7271 return iemRaiseUndefinedOpcode(pVCpu);
7272
7273 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7274 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP)))
7275 { /* likely */ }
7276 else
7277 {
7278 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
7279 return iemRaiseUndefinedOpcode(pVCpu);
7280 }
7281
7282 if (IEM_GET_CPL(pVCpu) != 0)
7283 {
7284 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7285 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7286 {
7287 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7288 return iemRaiseGeneralProtectionFault0(pVCpu);
7289 }
7290 }
7291
7292 if (!IEM_IS_IN_GUEST(pVCpu))
7293 { /* probable */ }
7294 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7295 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7296 {
7297 Log(("rdtscp: Guest intercept -> VM-exit\n"));
7298 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
7299 }
7300 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
7301 {
7302 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
7303 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7304 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7305 }
7306
7307 /*
7308 * Do the job.
7309 * Query the MSR first in case of trips to ring-3.
7310 */
7311 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
7312 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
7313 if (rcStrict == VINF_SUCCESS)
7314 {
7315 /* Low dword of the TSC_AUX msr only. */
7316 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7317
7318 uint64_t uTicks = TMCpuTickGet(pVCpu);
7319#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7320 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7321#endif
7322 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7323 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7324 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7325 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7326 }
7327 return rcStrict;
7328}
7329
7330
7331/**
7332 * Implements RDPMC.
7333 */
7334IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7335{
7336 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7337
7338 if ( IEM_GET_CPL(pVCpu) != 0
7339 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7340 return iemRaiseGeneralProtectionFault0(pVCpu);
7341
7342 if (!IEM_IS_IN_GUEST(pVCpu))
7343 { /* probable */ }
7344 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7345 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7346 {
7347 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7348 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7349 }
7350 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7351 {
7352 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7353 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7354 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7355 }
7356
7357 /** @todo Emulate performance counters, for now just return 0. */
7358 pVCpu->cpum.GstCtx.rax = 0;
7359 pVCpu->cpum.GstCtx.rdx = 0;
7360 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7361 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7362 * ecx but see @bugref{3472}! */
7363
7364 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7365}
7366
7367
7368/**
7369 * Implements RDMSR.
7370 */
7371IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7372{
7373 /*
7374 * Check preconditions.
7375 */
7376 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7377 return iemRaiseUndefinedOpcode(pVCpu);
7378 if (IEM_GET_CPL(pVCpu) != 0)
7379 return iemRaiseGeneralProtectionFault0(pVCpu);
7380
7381 /*
7382 * Check nested-guest intercepts.
7383 */
7384 if (!IEM_IS_IN_GUEST(pVCpu))
7385 { /* probable */ }
7386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7387 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7388 {
7389 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7390 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7391 }
7392#endif
7393#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7394 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7395 {
7396 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */, cbInstr);
7397 if (rcStrict == VINF_SVM_VMEXIT)
7398 return VINF_SUCCESS;
7399 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7400 {
7401 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7402 return rcStrict;
7403 }
7404 }
7405#endif
7406
7407 /*
7408 * Do the job.
7409 */
7410 RTUINT64U uValue;
7411 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7412 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7413
7414 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7415 if (rcStrict == VINF_SUCCESS)
7416 {
7417 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7418 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7419 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7420
7421 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7422 }
7423
7424#ifndef IN_RING3
7425 /* Deferred to ring-3. */
7426 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7427 {
7428 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7429 return rcStrict;
7430 }
7431#endif
7432
7433 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7434 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7435 {
7436 pVCpu->iem.s.cLogRelRdMsr++;
7437 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7438 }
7439 else
7440 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7441 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7442 return iemRaiseGeneralProtectionFault0(pVCpu);
7443}
7444
7445
7446/**
7447 * Implements WRMSR.
7448 */
7449IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7450{
7451 /*
7452 * Check preconditions.
7453 */
7454 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7455 return iemRaiseUndefinedOpcode(pVCpu);
7456 if (IEM_GET_CPL(pVCpu) != 0)
7457 return iemRaiseGeneralProtectionFault0(pVCpu);
7458
7459 RTUINT64U uValue;
7460 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7461 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7462
7463 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7464
7465 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7466 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7467
7468 /*
7469 * Check nested-guest intercepts.
7470 */
7471 if (!IEM_IS_IN_GUEST(pVCpu))
7472 { /* probable */ }
7473#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7474 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7475 {
7476 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7477 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7478 }
7479#endif
7480#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7481 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7482 {
7483 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */, cbInstr);
7484 if (rcStrict == VINF_SVM_VMEXIT)
7485 return VINF_SUCCESS;
7486 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7487 {
7488 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7489 return rcStrict;
7490 }
7491 }
7492#endif
7493
7494 /*
7495 * Do the job.
7496 */
7497 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7498 if (rcStrict == VINF_SUCCESS)
7499 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7500
7501#ifndef IN_RING3
7502 /* Deferred to ring-3. */
7503 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7504 {
7505 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7506 return rcStrict;
7507 }
7508#endif
7509
7510 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7511 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7512 {
7513 pVCpu->iem.s.cLogRelWrMsr++;
7514 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7515 }
7516 else
7517 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7518 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7519 return iemRaiseGeneralProtectionFault0(pVCpu);
7520}
7521
7522
7523/**
7524 * Implements 'IN eAX, port'.
7525 *
7526 * @param u16Port The source port.
7527 * @param cbReg The register size.
7528 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7529 * immediate operand or the implicit DX register.
7530 * Bits 3-0: Effective address mode.
7531 */
7532IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7533{
7534 /*
7535 * CPL check
7536 */
7537 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7538 if (rcStrict != VINF_SUCCESS)
7539 return rcStrict;
7540
7541 if (!IEM_IS_IN_GUEST(pVCpu))
7542 { /* probable */ }
7543
7544 /*
7545 * Check VMX nested-guest IO intercept.
7546 */
7547#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7548 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7549 {
7550 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7551 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7552 return rcStrict;
7553 }
7554#endif
7555
7556 /*
7557 * Check SVM nested-guest IO intercept.
7558 */
7559#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7560 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7561 {
7562 uint8_t cAddrSizeBits;
7563 switch (bImmAndEffAddrMode & 0xf)
7564 {
7565 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7566 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7567 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7569 }
7570 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7571 false /* fRep */, false /* fStrIo */, cbInstr);
7572 if (rcStrict == VINF_SVM_VMEXIT)
7573 return VINF_SUCCESS;
7574 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7575 {
7576 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7577 VBOXSTRICTRC_VAL(rcStrict)));
7578 return rcStrict;
7579 }
7580 }
7581#endif
7582#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7583 RT_NOREF(bImmAndEffAddrMode);
7584#endif
7585
7586 /*
7587 * Perform the I/O.
7588 */
7589 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7590 uint32_t u32Value = 0;
7591 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7592 if (IOM_SUCCESS(rcStrict))
7593 {
7594 switch (cbReg)
7595 {
7596 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7597 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7598 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7599 default: AssertFailedReturn(VERR_IEM_IPE_3);
7600 }
7601
7602 pVCpu->iem.s.cPotentialExits++;
7603 if (rcStrict != VINF_SUCCESS)
7604 iemSetPassUpStatus(pVCpu, rcStrict);
7605
7606 /*
7607 * Check for I/O breakpoints before we complete the instruction.
7608 */
7609 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7610 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7611 && X86_DR7_ANY_RW_IO(fDr7)
7612 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7613 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7614 && rcStrict == VINF_SUCCESS))
7615 {
7616 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7617 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7618 }
7619
7620 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7621 }
7622
7623 return rcStrict;
7624}
7625
7626
7627/**
7628 * Implements 'IN eAX, DX'.
7629 *
7630 * @param cbReg The register size.
7631 * @param enmEffAddrMode Effective address mode.
7632 */
7633IEM_CIMPL_DEF_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7634{
7635 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7636}
7637
7638
7639/**
7640 * Implements 'OUT port, eAX'.
7641 *
7642 * @param u16Port The destination port.
7643 * @param cbReg The register size.
7644 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7645 * immediate operand or the implicit DX register.
7646 * Bits 3-0: Effective address mode.
7647 */
7648IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7649{
7650 /*
7651 * CPL check
7652 */
7653 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7654 if (rcStrict != VINF_SUCCESS)
7655 return rcStrict;
7656
7657 if (!IEM_IS_IN_GUEST(pVCpu))
7658 { /* probable */ }
7659
7660 /*
7661 * Check VMX nested-guest I/O intercept.
7662 */
7663#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7664 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7665 {
7666 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7667 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7668 return rcStrict;
7669 }
7670#endif
7671
7672 /*
7673 * Check SVM nested-guest I/O intercept.
7674 */
7675#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7676 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7677 {
7678 uint8_t cAddrSizeBits;
7679 switch (bImmAndEffAddrMode & 0xf)
7680 {
7681 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7682 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7683 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7685 }
7686 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7687 false /* fRep */, false /* fStrIo */, cbInstr);
7688 if (rcStrict == VINF_SVM_VMEXIT)
7689 return VINF_SUCCESS;
7690 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7691 {
7692 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7693 VBOXSTRICTRC_VAL(rcStrict)));
7694 return rcStrict;
7695 }
7696 }
7697#endif
7698#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7699 RT_NOREF(bImmAndEffAddrMode);
7700#endif
7701
7702 /*
7703 * Perform the I/O.
7704 */
7705 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7706 uint32_t u32Value;
7707 switch (cbReg)
7708 {
7709 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7710 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7711 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7712 default: AssertFailedReturn(VERR_IEM_IPE_4);
7713 }
7714 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7715 if (IOM_SUCCESS(rcStrict))
7716 {
7717 pVCpu->iem.s.cPotentialExits++;
7718 if (rcStrict != VINF_SUCCESS)
7719 iemSetPassUpStatus(pVCpu, rcStrict);
7720
7721 /*
7722 * Check for I/O breakpoints before we complete the instruction.
7723 */
7724 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7725 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7726 && X86_DR7_ANY_RW_IO(fDr7)
7727 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7728 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7729 && rcStrict == VINF_SUCCESS))
7730 {
7731 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7732 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7733 }
7734
7735 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7736 }
7737 return rcStrict;
7738}
7739
7740
7741/**
7742 * Implements 'OUT DX, eAX'.
7743 *
7744 * @param cbReg The register size.
7745 * @param enmEffAddrMode Effective address mode.
7746 */
7747IEM_CIMPL_DEF_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7748{
7749 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7750}
7751
7752
7753/**
7754 * Implements 'CLI'.
7755 */
7756IEM_CIMPL_DEF_0(iemCImpl_cli)
7757{
7758 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7759#ifdef LOG_ENABLED
7760 uint32_t const fEflOld = fEfl;
7761#endif
7762
7763 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7764 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7765 {
7766 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7767 if (!(fEfl & X86_EFL_VM))
7768 {
7769 if (IEM_GET_CPL(pVCpu) <= uIopl)
7770 fEfl &= ~X86_EFL_IF;
7771 else if ( IEM_GET_CPL(pVCpu) == 3
7772 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7773 fEfl &= ~X86_EFL_VIF;
7774 else
7775 return iemRaiseGeneralProtectionFault0(pVCpu);
7776 }
7777 /* V8086 */
7778 else if (uIopl == 3)
7779 fEfl &= ~X86_EFL_IF;
7780 else if ( uIopl < 3
7781 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7782 fEfl &= ~X86_EFL_VIF;
7783 else
7784 return iemRaiseGeneralProtectionFault0(pVCpu);
7785 }
7786 /* real mode */
7787 else
7788 fEfl &= ~X86_EFL_IF;
7789
7790 /* Commit. */
7791 IEMMISC_SET_EFL(pVCpu, fEfl);
7792 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7793 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7794 return rcStrict;
7795}
7796
7797
7798/**
7799 * Implements 'STI'.
7800 */
7801IEM_CIMPL_DEF_0(iemCImpl_sti)
7802{
7803 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7804 uint32_t const fEflOld = fEfl;
7805
7806 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7807 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7808 {
7809 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7810 if (!(fEfl & X86_EFL_VM))
7811 {
7812 if (IEM_GET_CPL(pVCpu) <= uIopl)
7813 fEfl |= X86_EFL_IF;
7814 else if ( IEM_GET_CPL(pVCpu) == 3
7815 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7816 && !(fEfl & X86_EFL_VIP) )
7817 fEfl |= X86_EFL_VIF;
7818 else
7819 return iemRaiseGeneralProtectionFault0(pVCpu);
7820 }
7821 /* V8086 */
7822 else if (uIopl == 3)
7823 fEfl |= X86_EFL_IF;
7824 else if ( uIopl < 3
7825 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7826 && !(fEfl & X86_EFL_VIP) )
7827 fEfl |= X86_EFL_VIF;
7828 else
7829 return iemRaiseGeneralProtectionFault0(pVCpu);
7830 }
7831 /* real mode */
7832 else
7833 fEfl |= X86_EFL_IF;
7834
7835 /*
7836 * Commit.
7837 *
7838 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7839 */
7840 IEMMISC_SET_EFL(pVCpu, fEfl);
7841 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7842 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7843 {
7844 /** @todo only set it the shadow flag if it was clear before? */
7845 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7846 }
7847 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7848 return rcStrict;
7849}
7850
7851
7852/**
7853 * Implements 'HLT'.
7854 */
7855IEM_CIMPL_DEF_0(iemCImpl_hlt)
7856{
7857 if (IEM_GET_CPL(pVCpu) != 0)
7858 return iemRaiseGeneralProtectionFault0(pVCpu);
7859
7860 if (!IEM_IS_IN_GUEST(pVCpu))
7861 { /* probable */ }
7862 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7863 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7864 {
7865 Log2(("hlt: Guest intercept -> VM-exit\n"));
7866 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7867 }
7868 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7869 {
7870 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7871 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7872 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7873 }
7874
7875 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7876 * be returning any status codes relating to non-guest events being raised, as
7877 * we'll mess up the guest HALT otherwise. */
7878 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7879 if (rcStrict == VINF_SUCCESS)
7880 rcStrict = VINF_EM_HALT;
7881 return rcStrict;
7882}
7883
7884
7885/**
7886 * Implements 'MONITOR'.
7887 */
7888IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7889{
7890 /*
7891 * Permission checks.
7892 */
7893 if (IEM_GET_CPL(pVCpu) != 0)
7894 {
7895 Log2(("monitor: CPL != 0\n"));
7896 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7897 }
7898 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7899 {
7900 Log2(("monitor: Not in CPUID\n"));
7901 return iemRaiseUndefinedOpcode(pVCpu);
7902 }
7903
7904 /*
7905 * Check VMX guest-intercept.
7906 * This should be considered a fault-like VM-exit.
7907 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7908 */
7909 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7910 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7911 { /* probable */ }
7912 else
7913 {
7914 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7915 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7916 }
7917
7918 /*
7919 * Gather the operands and validate them.
7920 */
7921 RTGCPTR GCPtrMem = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7922 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7923 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7924/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7925 * \#GP first. */
7926 if (uEcx != 0)
7927 {
7928 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7929 return iemRaiseGeneralProtectionFault0(pVCpu);
7930 }
7931
7932 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7933 if (rcStrict != VINF_SUCCESS)
7934 return rcStrict;
7935
7936 RTGCPHYS GCPhysMem;
7937 /** @todo access size */
7938 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7939 if (rcStrict != VINF_SUCCESS)
7940 return rcStrict;
7941
7942 if (!IEM_IS_IN_GUEST(pVCpu))
7943 { /* probable */ }
7944#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7945 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7946 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7947 {
7948 /*
7949 * MONITOR does not access the memory, just monitors the address. However,
7950 * if the address falls in the APIC-access page, the address monitored must
7951 * instead be the corresponding address in the virtual-APIC page.
7952 *
7953 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7954 */
7955 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7956 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7957 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7958 return rcStrict;
7959 }
7960#endif
7961 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7962 {
7963 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7964 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7965 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7966 }
7967
7968 /*
7969 * Call EM to prepare the monitor/wait.
7970 */
7971 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7972 Assert(rcStrict == VINF_SUCCESS);
7973 if (rcStrict == VINF_SUCCESS)
7974 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7975 return rcStrict;
7976}
7977
7978
7979/**
7980 * Implements 'MWAIT'.
7981 */
7982IEM_CIMPL_DEF_0(iemCImpl_mwait)
7983{
7984 /*
7985 * Permission checks.
7986 */
7987 if (IEM_GET_CPL(pVCpu) != 0)
7988 {
7989 Log2(("mwait: CPL != 0\n"));
7990 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7991 * EFLAGS.VM then.) */
7992 return iemRaiseUndefinedOpcode(pVCpu);
7993 }
7994 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7995 {
7996 Log2(("mwait: Not in CPUID\n"));
7997 return iemRaiseUndefinedOpcode(pVCpu);
7998 }
7999
8000 /* Check VMX nested-guest intercept. */
8001 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8002 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
8003 { /* probable */ }
8004 else
8005 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
8006
8007 /*
8008 * Gather the operands and validate them.
8009 */
8010 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
8011 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
8012 if (uEcx != 0)
8013 {
8014 /* Only supported extension is break on IRQ when IF=0. */
8015 if (uEcx > 1)
8016 {
8017 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
8018 return iemRaiseGeneralProtectionFault0(pVCpu);
8019 }
8020 uint32_t fMWaitFeatures = 0;
8021 uint32_t uIgnore = 0;
8022 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
8023 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
8024 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
8025 {
8026 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
8027 return iemRaiseGeneralProtectionFault0(pVCpu);
8028 }
8029
8030#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8031 /*
8032 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
8033 * for delivery; and interrupts are disabled the processor does not enter its
8034 * mwait state but rather passes control to the next instruction.
8035 *
8036 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
8037 */
8038 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8039 || pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
8040 { /* probable */ }
8041 else if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
8042 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
8043 /** @todo finish: check up this out after we move int window stuff out of the
8044 * run loop and into the instruction finishing logic here. */
8045 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8046#endif
8047 }
8048
8049 /*
8050 * Check SVM nested-guest mwait intercepts.
8051 */
8052 if (!IEM_IS_IN_GUEST(pVCpu))
8053 { /* probable */ }
8054 else if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
8055 && EMMonitorIsArmed(pVCpu))
8056 {
8057 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
8058 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8059 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8060 }
8061 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
8062 {
8063 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
8064 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8065 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8066 }
8067
8068 /*
8069 * Call EM to prepare the monitor/wait.
8070 *
8071 * This will return VINF_EM_HALT. If there the trap flag is set, we may
8072 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
8073 * that will only return guest related events.
8074 */
8075 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
8076
8077 /** @todo finish: This needs more thinking as we should suppress internal
8078 * debugger events here, or we'll bugger up the guest state even more than we
8079 * alread do around VINF_EM_HALT. */
8080 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8081 if (rcStrict2 != VINF_SUCCESS)
8082 {
8083 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
8084 rcStrict = rcStrict2;
8085 }
8086
8087 return rcStrict;
8088}
8089
8090
8091/**
8092 * Implements 'SWAPGS'.
8093 */
8094IEM_CIMPL_DEF_0(iemCImpl_swapgs)
8095{
8096 Assert(IEM_IS_64BIT_CODE(pVCpu)); /* Caller checks this. */
8097
8098 /*
8099 * Permission checks.
8100 */
8101 if (IEM_GET_CPL(pVCpu) != 0)
8102 {
8103 Log2(("swapgs: CPL != 0\n"));
8104 return iemRaiseUndefinedOpcode(pVCpu);
8105 }
8106
8107 /*
8108 * Do the job.
8109 */
8110 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
8111 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
8112 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
8113 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
8114
8115 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8116}
8117
8118
8119#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
8120/**
8121 * Handles a CPUID call.
8122 */
8123static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
8124 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
8125{
8126 switch (iFunction)
8127 {
8128 case VBOX_CPUID_FN_ID:
8129 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
8130 *pEax = VBOX_CPUID_RESP_ID_EAX;
8131 *pEbx = VBOX_CPUID_RESP_ID_EBX;
8132 *pEcx = VBOX_CPUID_RESP_ID_ECX;
8133 *pEdx = VBOX_CPUID_RESP_ID_EDX;
8134 break;
8135
8136 case VBOX_CPUID_FN_LOG:
8137 {
8138 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
8139 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8140
8141 /* Validate input. */
8142 uint32_t cchToLog = *pEdx;
8143 if (cchToLog <= _2M)
8144 {
8145 uint32_t const uLogPicker = *pEbx;
8146 if (uLogPicker <= 1)
8147 {
8148 /* Resolve the logger. */
8149 PRTLOGGER const pLogger = !uLogPicker
8150 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
8151 if (pLogger)
8152 {
8153 /* Copy over the data: */
8154 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
8155 while (cchToLog > 0)
8156 {
8157 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
8158 if (cbToMap > cchToLog)
8159 cbToMap = cchToLog;
8160 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
8161 * unnecessary calls & iterations per pages. */
8162 if (cbToMap > 512)
8163 cbToMap = 512;
8164 void *pvSrc = NULL;
8165 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
8166 if (rcStrict == VINF_SUCCESS)
8167 {
8168 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
8169 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);
8170 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
8171 }
8172 else
8173 {
8174 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
8175 return rcStrict;
8176 }
8177
8178 /* Advance. */
8179 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
8180 *pEdx = cchToLog -= cbToMap;
8181 }
8182 *pEax = VINF_SUCCESS;
8183 }
8184 else
8185 *pEax = (uint32_t)VERR_NOT_FOUND;
8186 }
8187 else
8188 *pEax = (uint32_t)VERR_NOT_FOUND;
8189 }
8190 else
8191 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
8192 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
8193 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
8194 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
8195 break;
8196 }
8197
8198 default:
8199 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
8200 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
8201 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
8202 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
8203 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
8204 break;
8205 }
8206 return VINF_SUCCESS;
8207}
8208#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
8209
8210/**
8211 * Implements 'CPUID'.
8212 */
8213IEM_CIMPL_DEF_0(iemCImpl_cpuid)
8214{
8215 if (!IEM_IS_IN_GUEST(pVCpu))
8216 { /* probable */ }
8217 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8218 {
8219 Log2(("cpuid: Guest intercept -> VM-exit\n"));
8220 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
8221 }
8222 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
8223 {
8224 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
8225 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8226 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8227 }
8228
8229
8230 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
8231 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
8232
8233#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
8234 /*
8235 * CPUID host call backdoor.
8236 */
8237 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
8238 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
8239 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
8240 {
8241 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
8242 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
8243 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8244 if (rcStrict != VINF_SUCCESS)
8245 return rcStrict;
8246 }
8247 /*
8248 * Regular CPUID.
8249 */
8250 else
8251#endif
8252 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
8253 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8254
8255 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
8256 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
8257 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
8258 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
8259 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
8260
8261 pVCpu->iem.s.cPotentialExits++;
8262 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8263}
8264
8265
8266/**
8267 * Implements 'AAD'.
8268 *
8269 * @param bImm The immediate operand.
8270 */
8271IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
8272{
8273 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8274 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
8275 pVCpu->cpum.GstCtx.ax = al;
8276 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8277 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8278 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8279
8280 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8281}
8282
8283
8284/**
8285 * Implements 'AAM'.
8286 *
8287 * @param bImm The immediate operand. Cannot be 0.
8288 */
8289IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
8290{
8291 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
8292
8293 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8294 uint8_t const al = (uint8_t)ax % bImm;
8295 uint8_t const ah = (uint8_t)ax / bImm;
8296 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
8297 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8298 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8299 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8300
8301 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8302}
8303
8304
8305/**
8306 * Implements 'DAA'.
8307 */
8308IEM_CIMPL_DEF_0(iemCImpl_daa)
8309{
8310 uint8_t const al = pVCpu->cpum.GstCtx.al;
8311 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8312
8313 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8314 || (al & 0xf) >= 10)
8315 {
8316 pVCpu->cpum.GstCtx.al = al + 6;
8317 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8318 }
8319 else
8320 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8321
8322 if (al >= 0x9a || fCarry)
8323 {
8324 pVCpu->cpum.GstCtx.al += 0x60;
8325 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8326 }
8327 else
8328 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8329
8330 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8331 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8332}
8333
8334
8335/**
8336 * Implements 'DAS'.
8337 */
8338IEM_CIMPL_DEF_0(iemCImpl_das)
8339{
8340 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
8341 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8342
8343 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8344 || (uInputAL & 0xf) >= 10)
8345 {
8346 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8347 if (uInputAL < 6)
8348 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8349 pVCpu->cpum.GstCtx.al = uInputAL - 6;
8350 }
8351 else
8352 {
8353 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8354 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8355 }
8356
8357 if (uInputAL >= 0x9a || fCarry)
8358 {
8359 pVCpu->cpum.GstCtx.al -= 0x60;
8360 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8361 }
8362
8363 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8364 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8365}
8366
8367
8368/**
8369 * Implements 'AAA'.
8370 */
8371IEM_CIMPL_DEF_0(iemCImpl_aaa)
8372{
8373 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8374 {
8375 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8376 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8377 {
8378 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8379 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8380 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8381 }
8382 else
8383 {
8384 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8385 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8386 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8387 }
8388 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8389 }
8390 else
8391 {
8392 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8393 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8394 {
8395 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8396 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8397 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8398 }
8399 else
8400 {
8401 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8402 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8403 }
8404 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8405 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8406 }
8407
8408 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8409}
8410
8411
8412/**
8413 * Implements 'AAS'.
8414 */
8415IEM_CIMPL_DEF_0(iemCImpl_aas)
8416{
8417 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8418 {
8419 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8420 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8421 {
8422 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8423 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8424 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8425 }
8426 else
8427 {
8428 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8429 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8430 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8431 }
8432 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8433 }
8434 else
8435 {
8436 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8437 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8438 {
8439 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8440 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8441 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8442 }
8443 else
8444 {
8445 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8446 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8447 }
8448 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8449 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8450 }
8451
8452 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8453}
8454
8455
8456/**
8457 * Implements the 16-bit version of 'BOUND'.
8458 *
8459 * @note We have separate 16-bit and 32-bit variants of this function due to
8460 * the decoder using unsigned parameters, whereas we want signed one to
8461 * do the job. This is significant for a recompiler.
8462 */
8463IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8464{
8465 /*
8466 * Check if the index is inside the bounds, otherwise raise #BR.
8467 */
8468 if ( idxArray >= idxLowerBound
8469 && idxArray <= idxUpperBound)
8470 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8471 return iemRaiseBoundRangeExceeded(pVCpu);
8472}
8473
8474
8475/**
8476 * Implements the 32-bit version of 'BOUND'.
8477 */
8478IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8479{
8480 /*
8481 * Check if the index is inside the bounds, otherwise raise #BR.
8482 */
8483 if ( idxArray >= idxLowerBound
8484 && idxArray <= idxUpperBound)
8485 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8486 return iemRaiseBoundRangeExceeded(pVCpu);
8487}
8488
8489
8490
8491/*
8492 * Instantiate the various string operation combinations.
8493 */
8494#define OP_SIZE 8
8495#define ADDR_SIZE 16
8496#include "IEMAllCImplStrInstr.cpp.h"
8497#define OP_SIZE 8
8498#define ADDR_SIZE 32
8499#include "IEMAllCImplStrInstr.cpp.h"
8500#define OP_SIZE 8
8501#define ADDR_SIZE 64
8502#include "IEMAllCImplStrInstr.cpp.h"
8503
8504#define OP_SIZE 16
8505#define ADDR_SIZE 16
8506#include "IEMAllCImplStrInstr.cpp.h"
8507#define OP_SIZE 16
8508#define ADDR_SIZE 32
8509#include "IEMAllCImplStrInstr.cpp.h"
8510#define OP_SIZE 16
8511#define ADDR_SIZE 64
8512#include "IEMAllCImplStrInstr.cpp.h"
8513
8514#define OP_SIZE 32
8515#define ADDR_SIZE 16
8516#include "IEMAllCImplStrInstr.cpp.h"
8517#define OP_SIZE 32
8518#define ADDR_SIZE 32
8519#include "IEMAllCImplStrInstr.cpp.h"
8520#define OP_SIZE 32
8521#define ADDR_SIZE 64
8522#include "IEMAllCImplStrInstr.cpp.h"
8523
8524#define OP_SIZE 64
8525#define ADDR_SIZE 32
8526#include "IEMAllCImplStrInstr.cpp.h"
8527#define OP_SIZE 64
8528#define ADDR_SIZE 64
8529#include "IEMAllCImplStrInstr.cpp.h"
8530
8531
8532/**
8533 * Implements 'XGETBV'.
8534 */
8535IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8536{
8537 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8538 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8539 {
8540 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8541 switch (uEcx)
8542 {
8543 case 0:
8544 break;
8545
8546 case 1: /** @todo Implement XCR1 support. */
8547 default:
8548 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8549 return iemRaiseGeneralProtectionFault0(pVCpu);
8550
8551 }
8552 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8553 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8554 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8555
8556 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8557 }
8558 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8559 return iemRaiseUndefinedOpcode(pVCpu);
8560}
8561
8562
8563/**
8564 * Implements 'XSETBV'.
8565 */
8566IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8567{
8568 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8569 {
8570 /** @todo explain why this happens before the CPL check. */
8571 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8572 { /* probable */ }
8573 else
8574 {
8575 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8576 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8577 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8578 }
8579
8580 if (IEM_GET_CPL(pVCpu) == 0)
8581 {
8582 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8583
8584 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8585 { /* probable */ }
8586 else
8587 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8588
8589 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8590 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8591 switch (uEcx)
8592 {
8593 case 0:
8594 {
8595 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8596 if (rc == VINF_SUCCESS)
8597 break;
8598 Assert(rc == VERR_CPUM_RAISE_GP_0);
8599 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8600 return iemRaiseGeneralProtectionFault0(pVCpu);
8601 }
8602
8603 case 1: /** @todo Implement XCR1 support. */
8604 default:
8605 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8606 return iemRaiseGeneralProtectionFault0(pVCpu);
8607
8608 }
8609
8610 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8611 }
8612
8613 Log(("xsetbv cpl=%u -> GP(0)\n", IEM_GET_CPL(pVCpu)));
8614 return iemRaiseGeneralProtectionFault0(pVCpu);
8615 }
8616 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8617 return iemRaiseUndefinedOpcode(pVCpu);
8618}
8619
8620#ifndef RT_ARCH_ARM64
8621# ifdef IN_RING3
8622
8623/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8624struct IEMCIMPLCX16ARGS
8625{
8626 PRTUINT128U pu128Dst;
8627 PRTUINT128U pu128RaxRdx;
8628 PRTUINT128U pu128RbxRcx;
8629 uint32_t *pEFlags;
8630# ifdef VBOX_STRICT
8631 uint32_t cCalls;
8632# endif
8633};
8634
8635/**
8636 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8637 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8638 */
8639static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8640{
8641 RT_NOREF(pVM, pVCpu);
8642 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8643# ifdef VBOX_STRICT
8644 Assert(pArgs->cCalls == 0);
8645 pArgs->cCalls++;
8646# endif
8647
8648 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8649 return VINF_SUCCESS;
8650}
8651
8652# endif /* IN_RING3 */
8653
8654/**
8655 * Implements 'CMPXCHG16B' fallback using rendezvous.
8656 */
8657IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8658 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
8659{
8660# ifdef IN_RING3
8661 struct IEMCIMPLCX16ARGS Args;
8662 Args.pu128Dst = pu128Dst;
8663 Args.pu128RaxRdx = pu128RaxRdx;
8664 Args.pu128RbxRcx = pu128RbxRcx;
8665 Args.pEFlags = pEFlags;
8666# ifdef VBOX_STRICT
8667 Args.cCalls = 0;
8668# endif
8669 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8670 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8671 Assert(Args.cCalls == 1);
8672 if (rcStrict == VINF_SUCCESS)
8673 {
8674 /* Duplicated tail code. */
8675 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
8676 if (rcStrict == VINF_SUCCESS)
8677 {
8678 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8679 if (!(*pEFlags & X86_EFL_ZF))
8680 {
8681 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8682 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8683 }
8684 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8685 }
8686 }
8687 return rcStrict;
8688# else
8689 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
8690 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8691# endif
8692}
8693
8694#endif /* RT_ARCH_ARM64 */
8695
8696/**
8697 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8698 *
8699 * This is implemented in C because it triggers a load like behaviour without
8700 * actually reading anything. Since that's not so common, it's implemented
8701 * here.
8702 *
8703 * @param iEffSeg The effective segment.
8704 * @param GCPtrEff The address of the image.
8705 */
8706IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8707{
8708 /*
8709 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8710 */
8711 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8712 if (rcStrict == VINF_SUCCESS)
8713 {
8714 RTGCPHYS GCPhysMem;
8715 /** @todo access size. */
8716 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8717 if (rcStrict == VINF_SUCCESS)
8718 {
8719#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8720 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8721 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8722 { /* probable */ }
8723 else
8724 {
8725 /*
8726 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8727 * that contains the address. However, if the address falls in the APIC-access
8728 * page, the address flushed must instead be the corresponding address in the
8729 * virtual-APIC page.
8730 *
8731 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8732 */
8733 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8734 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8735 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8736 return rcStrict;
8737 }
8738#endif
8739 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8740 }
8741 }
8742
8743 return rcStrict;
8744}
8745
8746
8747/**
8748 * Implements 'FINIT' and 'FNINIT'.
8749 *
8750 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8751 * not.
8752 */
8753IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8754{
8755 /*
8756 * Exceptions.
8757 */
8758 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8759 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8760 return iemRaiseDeviceNotAvailable(pVCpu);
8761
8762 iemFpuActualizeStateForChange(pVCpu);
8763 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8764
8765 /* FINIT: Raise #MF on pending exception(s): */
8766 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8767 return iemRaiseMathFault(pVCpu);
8768
8769 /*
8770 * Reset the state.
8771 */
8772 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8773
8774 /* Rotate the stack to account for changed TOS. */
8775 iemFpuRotateStackSetTop(&pXState->x87, 0);
8776
8777 pXState->x87.FCW = 0x37f;
8778 pXState->x87.FSW = 0;
8779 pXState->x87.FTW = 0x00; /* 0 - empty. */
8780 /** @todo Intel says the instruction and data pointers are not cleared on
8781 * 387, presume that 8087 and 287 doesn't do so either. */
8782 /** @todo test this stuff. */
8783 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8784 {
8785 pXState->x87.FPUDP = 0;
8786 pXState->x87.DS = 0; //??
8787 pXState->x87.Rsrvd2 = 0;
8788 pXState->x87.FPUIP = 0;
8789 pXState->x87.CS = 0; //??
8790 pXState->x87.Rsrvd1 = 0;
8791 }
8792 pXState->x87.FOP = 0;
8793
8794 iemHlpUsedFpu(pVCpu);
8795 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8796}
8797
8798
8799/**
8800 * Implements 'FXSAVE'.
8801 *
8802 * @param iEffSeg The effective segment.
8803 * @param GCPtrEff The address of the image.
8804 * @param enmEffOpSize The operand size (only REX.W really matters).
8805 */
8806IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8807{
8808 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8809
8810 /** @todo check out bugref{1529} and AMD behaviour */
8811
8812 /*
8813 * Raise exceptions.
8814 */
8815 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8816 return iemRaiseDeviceNotAvailable(pVCpu);
8817
8818 /*
8819 * Access the memory.
8820 */
8821 void *pvMem512;
8822 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8823 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8824 if (rcStrict != VINF_SUCCESS)
8825 return rcStrict;
8826 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8827 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8828
8829 /*
8830 * Store the registers.
8831 */
8832 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8833 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8834
8835 /* common for all formats */
8836 pDst->FCW = pSrc->FCW;
8837 pDst->FSW = pSrc->FSW;
8838 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8839 pDst->FOP = pSrc->FOP;
8840 pDst->MXCSR = pSrc->MXCSR;
8841 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8842 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8843 {
8844 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8845 * them for now... */
8846 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8847 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8848 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8849 pDst->aRegs[i].au32[3] = 0;
8850 }
8851
8852 /* FPU IP, CS, DP and DS. */
8853 pDst->FPUIP = pSrc->FPUIP;
8854 pDst->CS = pSrc->CS;
8855 pDst->FPUDP = pSrc->FPUDP;
8856 pDst->DS = pSrc->DS;
8857 if (enmEffOpSize == IEMMODE_64BIT)
8858 {
8859 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8860 pDst->Rsrvd1 = pSrc->Rsrvd1;
8861 pDst->Rsrvd2 = pSrc->Rsrvd2;
8862 }
8863 else
8864 {
8865 pDst->Rsrvd1 = 0;
8866 pDst->Rsrvd2 = 0;
8867 }
8868
8869 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set. */
8870 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8871 || !IEM_IS_64BIT_CODE(pVCpu)
8872 || IEM_GET_CPL(pVCpu) != 0)
8873 {
8874 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8875 for (uint32_t i = 0; i < cXmmRegs; i++)
8876 pDst->aXMM[i] = pSrc->aXMM[i];
8877 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8878 * right? */
8879 }
8880
8881 /*
8882 * Commit the memory.
8883 */
8884 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8885 if (rcStrict != VINF_SUCCESS)
8886 return rcStrict;
8887
8888 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8889}
8890
8891
8892/**
8893 * Implements 'FXRSTOR'.
8894 *
8895 * @param iEffSeg The effective segment register for @a GCPtrEff.
8896 * @param GCPtrEff The address of the image.
8897 * @param enmEffOpSize The operand size (only REX.W really matters).
8898 */
8899IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8900{
8901 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8902
8903 /** @todo check out bugref{1529} and AMD behaviour */
8904
8905 /*
8906 * Raise exceptions.
8907 */
8908 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8909 return iemRaiseDeviceNotAvailable(pVCpu);
8910
8911 /*
8912 * Access the memory.
8913 */
8914 void *pvMem512;
8915 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8916 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8917 if (rcStrict != VINF_SUCCESS)
8918 return rcStrict;
8919 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8920 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8921
8922 /*
8923 * Check the state for stuff which will #GP(0).
8924 */
8925 uint32_t const fMXCSR = pSrc->MXCSR;
8926 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8927 if (fMXCSR & ~fMXCSR_MASK)
8928 {
8929 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8930 return iemRaiseGeneralProtectionFault0(pVCpu);
8931 }
8932
8933 /*
8934 * Load the registers.
8935 */
8936 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8937 * implementation specific whether MXCSR and XMM0-XMM7 are
8938 * restored according to Intel.
8939 * AMD says MXCSR and XMM registers are never loaded if
8940 * CR4.OSFXSR=0.
8941 */
8942
8943 /* common for all formats */
8944 pDst->FCW = pSrc->FCW;
8945 pDst->FSW = pSrc->FSW;
8946 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8947 pDst->FOP = pSrc->FOP;
8948 pDst->MXCSR = fMXCSR;
8949 /* (MXCSR_MASK is read-only) */
8950 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8951 {
8952 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8953 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8954 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8955 pDst->aRegs[i].au32[3] = 0;
8956 }
8957
8958 /* FPU IP, CS, DP and DS. */
8959 /** @todo AMD says this is only done if FSW.ES is set after loading. */
8960 if (enmEffOpSize == IEMMODE_64BIT)
8961 {
8962 pDst->FPUIP = pSrc->FPUIP;
8963 pDst->CS = pSrc->CS;
8964 pDst->Rsrvd1 = pSrc->Rsrvd1;
8965 pDst->FPUDP = pSrc->FPUDP;
8966 pDst->DS = pSrc->DS;
8967 pDst->Rsrvd2 = pSrc->Rsrvd2;
8968 }
8969 else
8970 {
8971 pDst->FPUIP = pSrc->FPUIP;
8972 pDst->CS = pSrc->CS;
8973 pDst->Rsrvd1 = 0;
8974 pDst->FPUDP = pSrc->FPUDP;
8975 pDst->DS = pSrc->DS;
8976 pDst->Rsrvd2 = 0;
8977 }
8978
8979 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set.
8980 * Does not affect MXCSR, only registers.
8981 */
8982 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8983 || !IEM_IS_64BIT_CODE(pVCpu)
8984 || IEM_GET_CPL(pVCpu) != 0)
8985 {
8986 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8987 for (uint32_t i = 0; i < cXmmRegs; i++)
8988 pDst->aXMM[i] = pSrc->aXMM[i];
8989 }
8990
8991 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
8992 iemFpuRecalcExceptionStatus(pDst);
8993
8994 if (pDst->FSW & X86_FSW_ES)
8995 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8996 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8997
8998 /*
8999 * Unmap the memory.
9000 */
9001 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
9002 if (rcStrict != VINF_SUCCESS)
9003 return rcStrict;
9004
9005 iemHlpUsedFpu(pVCpu);
9006 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9007}
9008
9009
9010/**
9011 * Implements 'XSAVE'.
9012 *
9013 * @param iEffSeg The effective segment.
9014 * @param GCPtrEff The address of the image.
9015 * @param enmEffOpSize The operand size (only REX.W really matters).
9016 */
9017IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
9018{
9019 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
9020
9021 /*
9022 * Raise exceptions.
9023 */
9024 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9025 return iemRaiseUndefinedOpcode(pVCpu);
9026 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
9027 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9028 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
9029 { /* likely */ }
9030 else
9031 {
9032 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
9033 return iemRaiseUndefinedOpcode(pVCpu);
9034 }
9035 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
9036 return iemRaiseDeviceNotAvailable(pVCpu);
9037
9038 /*
9039 * Calc the requested mask.
9040 */
9041 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
9042 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9043 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
9044
9045/** @todo figure out the exact protocol for the memory access. Currently we
9046 * just need this crap to work halfways to make it possible to test
9047 * AVX instructions. */
9048/** @todo figure out the XINUSE and XMODIFIED */
9049
9050 /*
9051 * Access the x87 memory state.
9052 */
9053 /* The x87+SSE state. */
9054 void *pvMem512;
9055 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9056 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
9057 if (rcStrict != VINF_SUCCESS)
9058 return rcStrict;
9059 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
9060 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
9061
9062 /* The header. */
9063 PX86XSAVEHDR pHdr;
9064 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
9065 if (rcStrict != VINF_SUCCESS)
9066 return rcStrict;
9067
9068 /*
9069 * Store the X87 state.
9070 */
9071 if (fReqComponents & XSAVE_C_X87)
9072 {
9073 /* common for all formats */
9074 pDst->FCW = pSrc->FCW;
9075 pDst->FSW = pSrc->FSW;
9076 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
9077 pDst->FOP = pSrc->FOP;
9078 pDst->FPUIP = pSrc->FPUIP;
9079 pDst->CS = pSrc->CS;
9080 pDst->FPUDP = pSrc->FPUDP;
9081 pDst->DS = pSrc->DS;
9082 if (enmEffOpSize == IEMMODE_64BIT)
9083 {
9084 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
9085 pDst->Rsrvd1 = pSrc->Rsrvd1;
9086 pDst->Rsrvd2 = pSrc->Rsrvd2;
9087 }
9088 else
9089 {
9090 pDst->Rsrvd1 = 0;
9091 pDst->Rsrvd2 = 0;
9092 }
9093 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
9094 {
9095 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
9096 * them for now... */
9097 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
9098 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9099 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9100 pDst->aRegs[i].au32[3] = 0;
9101 }
9102
9103 }
9104
9105 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9106 {
9107 pDst->MXCSR = pSrc->MXCSR;
9108 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9109 }
9110
9111 if (fReqComponents & XSAVE_C_SSE)
9112 {
9113 /* XMM registers. */
9114 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9115 for (uint32_t i = 0; i < cXmmRegs; i++)
9116 pDst->aXMM[i] = pSrc->aXMM[i];
9117 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9118 * right? */
9119 }
9120
9121 /* Commit the x87 state bits. (probably wrong) */
9122 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9123 if (rcStrict != VINF_SUCCESS)
9124 return rcStrict;
9125
9126 /*
9127 * Store AVX state.
9128 */
9129 if (fReqComponents & XSAVE_C_YMM)
9130 {
9131 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9132 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9133 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
9134 PX86XSAVEYMMHI pCompDst;
9135 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9136 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
9137 if (rcStrict != VINF_SUCCESS)
9138 return rcStrict;
9139
9140 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9141 for (uint32_t i = 0; i < cXmmRegs; i++)
9142 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
9143
9144 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9145 if (rcStrict != VINF_SUCCESS)
9146 return rcStrict;
9147 }
9148
9149 /*
9150 * Update the header.
9151 */
9152 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
9153 | (fReqComponents & fXInUse);
9154
9155 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
9156 if (rcStrict != VINF_SUCCESS)
9157 return rcStrict;
9158
9159 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9160}
9161
9162
9163/**
9164 * Implements 'XRSTOR'.
9165 *
9166 * @param iEffSeg The effective segment.
9167 * @param GCPtrEff The address of the image.
9168 * @param enmEffOpSize The operand size (only REX.W really matters).
9169 */
9170IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
9171{
9172 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
9173
9174 /*
9175 * Raise exceptions.
9176 */
9177 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9178 return iemRaiseUndefinedOpcode(pVCpu);
9179 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
9180 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9181 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
9182 { /* likely */ }
9183 else
9184 {
9185 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
9186 return iemRaiseUndefinedOpcode(pVCpu);
9187 }
9188 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
9189 return iemRaiseDeviceNotAvailable(pVCpu);
9190 if (GCPtrEff & 63)
9191 {
9192 /** @todo CPU/VM detection possible! \#AC might not be signal for
9193 * all/any misalignment sizes, intel says its an implementation detail. */
9194 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
9195 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
9196 && IEM_GET_CPL(pVCpu) == 3)
9197 return iemRaiseAlignmentCheckException(pVCpu);
9198 return iemRaiseGeneralProtectionFault0(pVCpu);
9199 }
9200
9201/** @todo figure out the exact protocol for the memory access. Currently we
9202 * just need this crap to work halfways to make it possible to test
9203 * AVX instructions. */
9204/** @todo figure out the XINUSE and XMODIFIED */
9205
9206 /*
9207 * Access the x87 memory state.
9208 */
9209 /* The x87+SSE state. */
9210 void *pvMem512;
9211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
9212 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
9213 if (rcStrict != VINF_SUCCESS)
9214 return rcStrict;
9215 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
9216 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
9217
9218 /*
9219 * Calc the requested mask
9220 */
9221 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
9222 PCX86XSAVEHDR pHdrSrc;
9223 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
9224 IEM_ACCESS_DATA_R, 0 /* checked above */);
9225 if (rcStrict != VINF_SUCCESS)
9226 return rcStrict;
9227
9228 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
9229 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9230 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
9231 uint64_t const fRstorMask = pHdrSrc->bmXState;
9232 uint64_t const fCompMask = pHdrSrc->bmXComp;
9233
9234 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9235
9236 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9237
9238 /* We won't need this any longer. */
9239 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
9240 if (rcStrict != VINF_SUCCESS)
9241 return rcStrict;
9242
9243 /*
9244 * Load the X87 state.
9245 */
9246 if (fReqComponents & XSAVE_C_X87)
9247 {
9248 if (fRstorMask & XSAVE_C_X87)
9249 {
9250 pDst->FCW = pSrc->FCW;
9251 pDst->FSW = pSrc->FSW;
9252 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
9253 pDst->FOP = pSrc->FOP;
9254 pDst->FPUIP = pSrc->FPUIP;
9255 pDst->CS = pSrc->CS;
9256 pDst->FPUDP = pSrc->FPUDP;
9257 pDst->DS = pSrc->DS;
9258 if (enmEffOpSize == IEMMODE_64BIT)
9259 {
9260 /* Load upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
9261 pDst->Rsrvd1 = pSrc->Rsrvd1;
9262 pDst->Rsrvd2 = pSrc->Rsrvd2;
9263 }
9264 else
9265 {
9266 pDst->Rsrvd1 = 0;
9267 pDst->Rsrvd2 = 0;
9268 }
9269 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
9270 {
9271 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
9272 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9273 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9274 pDst->aRegs[i].au32[3] = 0;
9275 }
9276
9277 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9278 iemFpuRecalcExceptionStatus(pDst);
9279
9280 if (pDst->FSW & X86_FSW_ES)
9281 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
9282 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
9283 }
9284 else
9285 {
9286 pDst->FCW = 0x37f;
9287 pDst->FSW = 0;
9288 pDst->FTW = 0x00; /* 0 - empty. */
9289 pDst->FPUDP = 0;
9290 pDst->DS = 0; //??
9291 pDst->Rsrvd2= 0;
9292 pDst->FPUIP = 0;
9293 pDst->CS = 0; //??
9294 pDst->Rsrvd1= 0;
9295 pDst->FOP = 0;
9296 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
9297 {
9298 pDst->aRegs[i].au32[0] = 0;
9299 pDst->aRegs[i].au32[1] = 0;
9300 pDst->aRegs[i].au32[2] = 0;
9301 pDst->aRegs[i].au32[3] = 0;
9302 }
9303 }
9304 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
9305 }
9306
9307 /* MXCSR */
9308 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9309 {
9310 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
9311 pDst->MXCSR = pSrc->MXCSR;
9312 else
9313 pDst->MXCSR = 0x1f80;
9314 }
9315
9316 /* XMM registers. */
9317 if (fReqComponents & XSAVE_C_SSE)
9318 {
9319 if (fRstorMask & XSAVE_C_SSE)
9320 {
9321 for (uint32_t i = 0; i < cXmmRegs; i++)
9322 pDst->aXMM[i] = pSrc->aXMM[i];
9323 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9324 * right? */
9325 }
9326 else
9327 {
9328 for (uint32_t i = 0; i < cXmmRegs; i++)
9329 {
9330 pDst->aXMM[i].au64[0] = 0;
9331 pDst->aXMM[i].au64[1] = 0;
9332 }
9333 }
9334 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
9335 }
9336
9337 /* Unmap the x87 state bits (so we've don't run out of mapping). */
9338 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
9339 if (rcStrict != VINF_SUCCESS)
9340 return rcStrict;
9341
9342 /*
9343 * Restore AVX state.
9344 */
9345 if (fReqComponents & XSAVE_C_YMM)
9346 {
9347 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9348 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
9349
9350 if (fRstorMask & XSAVE_C_YMM)
9351 {
9352 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9353 PCX86XSAVEYMMHI pCompSrc;
9354 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
9355 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9356 IEM_ACCESS_DATA_R, 0 /* checked above */);
9357 if (rcStrict != VINF_SUCCESS)
9358 return rcStrict;
9359
9360 for (uint32_t i = 0; i < cXmmRegs; i++)
9361 {
9362 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
9363 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
9364 }
9365
9366 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
9367 if (rcStrict != VINF_SUCCESS)
9368 return rcStrict;
9369 }
9370 else
9371 {
9372 for (uint32_t i = 0; i < cXmmRegs; i++)
9373 {
9374 pCompDst->aYmmHi[i].au64[0] = 0;
9375 pCompDst->aYmmHi[i].au64[1] = 0;
9376 }
9377 }
9378 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
9379 }
9380
9381 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9382}
9383
9384
9385
9386
9387/**
9388 * Implements 'STMXCSR'.
9389 *
9390 * @param iEffSeg The effective segment register for @a GCPtrEff.
9391 * @param GCPtrEff The address of the image.
9392 */
9393IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9394{
9395 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9396
9397 /*
9398 * Raise exceptions.
9399 */
9400 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9401 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9402 {
9403 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9404 {
9405 /*
9406 * Do the job.
9407 */
9408 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9409 if (rcStrict == VINF_SUCCESS)
9410 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9411 return rcStrict;
9412 }
9413 return iemRaiseDeviceNotAvailable(pVCpu);
9414 }
9415 return iemRaiseUndefinedOpcode(pVCpu);
9416}
9417
9418
9419/**
9420 * Implements 'VSTMXCSR'.
9421 *
9422 * @param iEffSeg The effective segment register for @a GCPtrEff.
9423 * @param GCPtrEff The address of the image.
9424 */
9425IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9426{
9427 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9428
9429 /*
9430 * Raise exceptions.
9431 */
9432 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9433 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9434 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9435 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9436 {
9437 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9438 {
9439 /*
9440 * Do the job.
9441 */
9442 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9443 if (rcStrict == VINF_SUCCESS)
9444 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9445 return rcStrict;
9446 }
9447 return iemRaiseDeviceNotAvailable(pVCpu);
9448 }
9449 return iemRaiseUndefinedOpcode(pVCpu);
9450}
9451
9452
9453/**
9454 * Implements 'LDMXCSR'.
9455 *
9456 * @param iEffSeg The effective segment register for @a GCPtrEff.
9457 * @param GCPtrEff The address of the image.
9458 */
9459IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9460{
9461 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9462
9463 /*
9464 * Raise exceptions.
9465 */
9466 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9467 * happen after or before \#UD and \#EM? */
9468 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9469 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9470 {
9471 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9472 {
9473 /*
9474 * Do the job.
9475 */
9476 uint32_t fNewMxCsr;
9477 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9478 if (rcStrict == VINF_SUCCESS)
9479 {
9480 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9481 if (!(fNewMxCsr & ~fMxCsrMask))
9482 {
9483 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9484 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9485 }
9486 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9487 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9488 return iemRaiseGeneralProtectionFault0(pVCpu);
9489 }
9490 return rcStrict;
9491 }
9492 return iemRaiseDeviceNotAvailable(pVCpu);
9493 }
9494 return iemRaiseUndefinedOpcode(pVCpu);
9495}
9496
9497
9498/**
9499 * Commmon routine for fnstenv and fnsave.
9500 *
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param enmEffOpSize The effective operand size.
9503 * @param uPtr Where to store the state.
9504 */
9505static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9506{
9507 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9508 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9509 if (enmEffOpSize == IEMMODE_16BIT)
9510 {
9511 uPtr.pu16[0] = pSrcX87->FCW;
9512 uPtr.pu16[1] = pSrcX87->FSW;
9513 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9514 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9515 {
9516 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9517 * protected mode or long mode and we save it in real mode? And vice
9518 * versa? And with 32-bit operand size? I think CPU is storing the
9519 * effective address ((CS << 4) + IP) in the offset register and not
9520 * doing any address calculations here. */
9521 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9522 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9523 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9524 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9525 }
9526 else
9527 {
9528 uPtr.pu16[3] = pSrcX87->FPUIP;
9529 uPtr.pu16[4] = pSrcX87->CS;
9530 uPtr.pu16[5] = pSrcX87->FPUDP;
9531 uPtr.pu16[6] = pSrcX87->DS;
9532 }
9533 }
9534 else
9535 {
9536 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9537 uPtr.pu16[0*2] = pSrcX87->FCW;
9538 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9539 uPtr.pu16[1*2] = pSrcX87->FSW;
9540 uPtr.pu16[1*2+1] = 0xffff;
9541 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9542 uPtr.pu16[2*2+1] = 0xffff;
9543 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9544 {
9545 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9546 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9547 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9548 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9549 }
9550 else
9551 {
9552 uPtr.pu32[3] = pSrcX87->FPUIP;
9553 uPtr.pu16[4*2] = pSrcX87->CS;
9554 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9555 uPtr.pu32[5] = pSrcX87->FPUDP;
9556 uPtr.pu16[6*2] = pSrcX87->DS;
9557 uPtr.pu16[6*2+1] = 0xffff;
9558 }
9559 }
9560}
9561
9562
9563/**
9564 * Commmon routine for fldenv and frstor
9565 *
9566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9567 * @param enmEffOpSize The effective operand size.
9568 * @param uPtr Where to store the state.
9569 */
9570static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9571{
9572 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9573 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9574 if (enmEffOpSize == IEMMODE_16BIT)
9575 {
9576 pDstX87->FCW = uPtr.pu16[0];
9577 pDstX87->FSW = uPtr.pu16[1];
9578 pDstX87->FTW = uPtr.pu16[2];
9579 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9580 {
9581 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9582 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9583 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9584 pDstX87->CS = 0;
9585 pDstX87->Rsrvd1= 0;
9586 pDstX87->DS = 0;
9587 pDstX87->Rsrvd2= 0;
9588 }
9589 else
9590 {
9591 pDstX87->FPUIP = uPtr.pu16[3];
9592 pDstX87->CS = uPtr.pu16[4];
9593 pDstX87->Rsrvd1= 0;
9594 pDstX87->FPUDP = uPtr.pu16[5];
9595 pDstX87->DS = uPtr.pu16[6];
9596 pDstX87->Rsrvd2= 0;
9597 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9598 }
9599 }
9600 else
9601 {
9602 pDstX87->FCW = uPtr.pu16[0*2];
9603 pDstX87->FSW = uPtr.pu16[1*2];
9604 pDstX87->FTW = uPtr.pu16[2*2];
9605 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9606 {
9607 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9608 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9609 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9610 pDstX87->CS = 0;
9611 pDstX87->Rsrvd1= 0;
9612 pDstX87->DS = 0;
9613 pDstX87->Rsrvd2= 0;
9614 }
9615 else
9616 {
9617 pDstX87->FPUIP = uPtr.pu32[3];
9618 pDstX87->CS = uPtr.pu16[4*2];
9619 pDstX87->Rsrvd1= 0;
9620 pDstX87->FOP = uPtr.pu16[4*2+1];
9621 pDstX87->FPUDP = uPtr.pu32[5];
9622 pDstX87->DS = uPtr.pu16[6*2];
9623 pDstX87->Rsrvd2= 0;
9624 }
9625 }
9626
9627 /* Make adjustments. */
9628 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9629#ifdef LOG_ENABLED
9630 uint16_t const fOldFsw = pDstX87->FSW;
9631#endif
9632 pDstX87->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9633 iemFpuRecalcExceptionStatus(pDstX87);
9634#ifdef LOG_ENABLED
9635 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9636 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9637 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9638 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9639#endif
9640
9641 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9642 * exceptions are pending after loading the saved state? */
9643}
9644
9645
9646/**
9647 * Implements 'FNSTENV'.
9648 *
9649 * @param enmEffOpSize The operand size (only REX.W really matters).
9650 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9651 * @param GCPtrEffDst The address of the image.
9652 */
9653IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9654{
9655 RTPTRUNION uPtr;
9656 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9657 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9658 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9659 if (rcStrict != VINF_SUCCESS)
9660 return rcStrict;
9661
9662 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9663
9664 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9665 if (rcStrict != VINF_SUCCESS)
9666 return rcStrict;
9667
9668 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9669 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9670 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9671#ifdef LOG_ENABLED
9672 uint16_t fOldFsw = pFpuCtx->FSW;
9673#endif
9674 iemFpuRecalcExceptionStatus(pFpuCtx);
9675#ifdef LOG_ENABLED
9676 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9677 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9678 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9679#endif
9680
9681 iemHlpUsedFpu(pVCpu);
9682
9683 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9684 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9685}
9686
9687
9688/**
9689 * Implements 'FNSAVE'.
9690 *
9691 * @param enmEffOpSize The operand size.
9692 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9693 * @param GCPtrEffDst The address of the image.
9694 */
9695IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9696{
9697 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9698
9699 RTPTRUNION uPtr;
9700 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9701 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9702 if (rcStrict != VINF_SUCCESS)
9703 return rcStrict;
9704
9705 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9706 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9707 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9708 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9709 {
9710 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9711 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9712 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9713 }
9714
9715 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9716 if (rcStrict != VINF_SUCCESS)
9717 return rcStrict;
9718
9719 /* Rotate the stack to account for changed TOS. */
9720 iemFpuRotateStackSetTop(pFpuCtx, 0);
9721
9722 /*
9723 * Re-initialize the FPU context.
9724 */
9725 pFpuCtx->FCW = 0x37f;
9726 pFpuCtx->FSW = 0;
9727 pFpuCtx->FTW = 0x00; /* 0 - empty */
9728 pFpuCtx->FPUDP = 0;
9729 pFpuCtx->DS = 0;
9730 pFpuCtx->Rsrvd2= 0;
9731 pFpuCtx->FPUIP = 0;
9732 pFpuCtx->CS = 0;
9733 pFpuCtx->Rsrvd1= 0;
9734 pFpuCtx->FOP = 0;
9735
9736 iemHlpUsedFpu(pVCpu);
9737 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9738}
9739
9740
9741
9742/**
9743 * Implements 'FLDENV'.
9744 *
9745 * @param enmEffOpSize The operand size (only REX.W really matters).
9746 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9747 * @param GCPtrEffSrc The address of the image.
9748 */
9749IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9750{
9751 RTCPTRUNION uPtr;
9752 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9753 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9754 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9755 if (rcStrict != VINF_SUCCESS)
9756 return rcStrict;
9757
9758 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9759
9760 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9761 if (rcStrict != VINF_SUCCESS)
9762 return rcStrict;
9763
9764 iemHlpUsedFpu(pVCpu);
9765 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9766}
9767
9768
9769/**
9770 * Implements 'FRSTOR'.
9771 *
9772 * @param enmEffOpSize The operand size.
9773 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9774 * @param GCPtrEffSrc The address of the image.
9775 */
9776IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9777{
9778 RTCPTRUNION uPtr;
9779 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9780 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9781 if (rcStrict != VINF_SUCCESS)
9782 return rcStrict;
9783
9784 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9785 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9786 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9787 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9788 {
9789 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9790 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9791 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9792 pFpuCtx->aRegs[i].au32[3] = 0;
9793 }
9794
9795 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9796 if (rcStrict != VINF_SUCCESS)
9797 return rcStrict;
9798
9799 iemHlpUsedFpu(pVCpu);
9800 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9801}
9802
9803
9804/**
9805 * Implements 'FLDCW'.
9806 *
9807 * @param u16Fcw The new FCW.
9808 */
9809IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9810{
9811 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9812
9813 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9814 /** @todo Testcase: Try see what happens when trying to set undefined bits
9815 * (other than 6 and 7). Currently ignoring them. */
9816 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9817 * according to FSW. (This is what is currently implemented.) */
9818 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9819 pFpuCtx->FCW = u16Fcw & (~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK); /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9820#ifdef LOG_ENABLED
9821 uint16_t fOldFsw = pFpuCtx->FSW;
9822#endif
9823 iemFpuRecalcExceptionStatus(pFpuCtx);
9824#ifdef LOG_ENABLED
9825 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9826 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9827 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9828#endif
9829
9830 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9831 iemHlpUsedFpu(pVCpu);
9832 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9833}
9834
9835
9836
9837/**
9838 * Implements the underflow case of fxch.
9839 *
9840 * @param iStReg The other stack register.
9841 * @param uFpuOpcode The FPU opcode (for simplicity).
9842 */
9843IEM_CIMPL_DEF_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode)
9844{
9845 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9846
9847 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9848 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9849 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9850 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9851
9852 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9853 * registers are read as QNaN and then exchanged. This could be
9854 * wrong... */
9855 if (pFpuCtx->FCW & X86_FCW_IM)
9856 {
9857 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9858 {
9859 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9860 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9861 else
9862 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9863 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9864 }
9865 else
9866 {
9867 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9868 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9869 }
9870 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9871 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9872 }
9873 else
9874 {
9875 /* raise underflow exception, don't change anything. */
9876 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9877 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9878 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9879 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9880 }
9881
9882 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
9883 iemHlpUsedFpu(pVCpu);
9884 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9885}
9886
9887
9888/**
9889 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9890 *
9891 * @param iStReg The other stack register.
9892 * @param pfnAImpl The assembly comparison implementation.
9893 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode.
9894 * Bit 31: Whether we should pop the stack when
9895 * done or not.
9896 */
9897IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)
9898{
9899 Assert(iStReg < 8);
9900 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9901
9902 /*
9903 * Raise exceptions.
9904 */
9905 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9906 return iemRaiseDeviceNotAvailable(pVCpu);
9907
9908 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9909 uint16_t u16Fsw = pFpuCtx->FSW;
9910 if (u16Fsw & X86_FSW_ES)
9911 return iemRaiseMathFault(pVCpu);
9912
9913 /*
9914 * Check if any of the register accesses causes #SF + #IA.
9915 */
9916 bool fPop = RT_BOOL(uPopAndFpuOpcode & RT_BIT_32(31));
9917 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9918 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9919 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9920 {
9921 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9922
9923 pFpuCtx->FSW &= ~X86_FSW_C1;
9924 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9925 if ( !(u16Fsw & X86_FSW_IE)
9926 || (pFpuCtx->FCW & X86_FCW_IM) )
9927 {
9928 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9929 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9930 }
9931 }
9932 else if (pFpuCtx->FCW & X86_FCW_IM)
9933 {
9934 /* Masked underflow. */
9935 pFpuCtx->FSW &= ~X86_FSW_C1;
9936 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9937 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9938 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9939 }
9940 else
9941 {
9942 /* Raise underflow - don't touch EFLAGS or TOP. */
9943 pFpuCtx->FSW &= ~X86_FSW_C1;
9944 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9945 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9946 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9947 fPop = false;
9948 }
9949
9950 /*
9951 * Pop if necessary.
9952 */
9953 if (fPop)
9954 {
9955 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9956 iemFpuStackIncTop(pVCpu);
9957 }
9958
9959 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, (uint16_t)uPopAndFpuOpcode);
9960 iemHlpUsedFpu(pVCpu);
9961 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9962}
9963
9964/** @} */
9965
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette