VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 60185

最後變更 在這個檔案從60185是 60185,由 vboxsync 提交於 9 年 前

IEM: Fixed a couple of edge cases and broken verification mode.

  • Update enmCpuMode after loading hidden CS flags (prep for recompiling).
  • Fixed retf in 64-bit mode where we would load CS.BASE with zero when returning to 16-bit or 32-bit code.
  • Fixed ESP/SP handling for protected mode exception injection.
  • Implemented the two string I/O notification functions that would assert in verification mode.
  • The IEMExec* methods must call iemUninitExec to undo poisoning of decoding data members as it will otherwise interfere with verification mode opcode fetching optimizations and other stuff.

The above makes the current bs3-cpu-basic-2 code work in --execute-all-in-iem mode.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 233.0 KB
 
1/* $Id: IEMAllCImpl.cpp.h 60185 2016-03-24 17:39:40Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23
24/**
25 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
26 *
27 * @returns Strict VBox status code.
28 *
29 * @param pIemCpu The IEM per CPU data.
30 * @param pCtx The register context.
31 * @param u16Port The port number.
32 * @param cbOperand The operand size.
33 */
34static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
35{
36 /* The TSS bits we're interested in are the same on 386 and AMD64. */
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
38 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
39 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
40 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
41
42 /*
43 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
44 */
45 Assert(!pCtx->tr.Attr.n.u1DescType);
46 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
47 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
48 {
49 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
50 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
51 return iemRaiseGeneralProtectionFault0(pIemCpu);
52 }
53
54 /*
55 * Read the bitmap offset (may #PF).
56 */
57 uint16_t offBitmap;
58 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX,
59 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
60 if (rcStrict != VINF_SUCCESS)
61 {
62 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
63 return rcStrict;
64 }
65
66 /*
67 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
68 * describes the CPU actually reading two bytes regardless of whether the
69 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
70 */
71 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
72 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
73 * for instance sizeof(X86TSS32). */
74 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
75 {
76 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
77 offFirstBit, pCtx->tr.u32Limit));
78 return iemRaiseGeneralProtectionFault0(pIemCpu);
79 }
80
81 /*
82 * Read the necessary bits.
83 */
84 /** @todo Test the assertion in the intel manual that the CPU reads two
85 * bytes. The question is how this works wrt to #PF and #GP on the
86 * 2nd byte when it's not required. */
87 uint16_t bmBytes = UINT16_MAX;
88 rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
89 if (rcStrict != VINF_SUCCESS)
90 {
91 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
92 return rcStrict;
93 }
94
95 /*
96 * Perform the check.
97 */
98 uint16_t fPortMask = (1 << cbOperand) - 1;
99 bmBytes >>= (u16Port & 7);
100 if (bmBytes & fPortMask)
101 {
102 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
103 u16Port, cbOperand, bmBytes, fPortMask));
104 return iemRaiseGeneralProtectionFault0(pIemCpu);
105 }
106
107 return VINF_SUCCESS;
108}
109
110
111/**
112 * Checks if we are allowed to access the given I/O port, raising the
113 * appropriate exceptions if we aren't (or if the I/O bitmap is not
114 * accessible).
115 *
116 * @returns Strict VBox status code.
117 *
118 * @param pIemCpu The IEM per CPU data.
119 * @param pCtx The register context.
120 * @param u16Port The port number.
121 * @param cbOperand The operand size.
122 */
123DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
124{
125 X86EFLAGS Efl;
126 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
127 if ( (pCtx->cr0 & X86_CR0_PE)
128 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
129 || Efl.Bits.u1VM) )
130 return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand);
131 return VINF_SUCCESS;
132}
133
134
135#if 0
136/**
137 * Calculates the parity bit.
138 *
139 * @returns true if the bit is set, false if not.
140 * @param u8Result The least significant byte of the result.
141 */
142static bool iemHlpCalcParityFlag(uint8_t u8Result)
143{
144 /*
145 * Parity is set if the number of bits in the least significant byte of
146 * the result is even.
147 */
148 uint8_t cBits;
149 cBits = u8Result & 1; /* 0 */
150 u8Result >>= 1;
151 cBits += u8Result & 1;
152 u8Result >>= 1;
153 cBits += u8Result & 1;
154 u8Result >>= 1;
155 cBits += u8Result & 1;
156 u8Result >>= 1;
157 cBits += u8Result & 1; /* 4 */
158 u8Result >>= 1;
159 cBits += u8Result & 1;
160 u8Result >>= 1;
161 cBits += u8Result & 1;
162 u8Result >>= 1;
163 cBits += u8Result & 1;
164 return !(cBits & 1);
165}
166#endif /* not used */
167
168
169/**
170 * Updates the specified flags according to a 8-bit result.
171 *
172 * @param pIemCpu The IEM state of the calling EMT.
173 * @param u8Result The result to set the flags according to.
174 * @param fToUpdate The flags to update.
175 * @param fUndefined The flags that are specified as undefined.
176 */
177static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
178{
179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
180
181 uint32_t fEFlags = pCtx->eflags.u;
182 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
183 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
184 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
185#ifdef IEM_VERIFICATION_MODE_FULL
186 pIemCpu->fUndefinedEFlags |= fUndefined;
187#endif
188}
189
190
191/**
192 * Helper used by iret.
193 *
194 * @param uCpl The new CPL.
195 * @param pSReg Pointer to the segment register.
196 */
197static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
198{
199#ifdef VBOX_WITH_RAW_MODE_NOT_R0
200 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
201 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
202#else
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
204#endif
205
206 if ( uCpl > pSReg->Attr.n.u2Dpl
207 && pSReg->Attr.n.u1DescType /* code or data, not system */
208 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
210 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0);
211}
212
213
214/**
215 * Indicates that we have modified the FPU state.
216 *
217 * @param pIemCpu The IEM state of the calling EMT.
218 */
219DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
220{
221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
222}
223
224/** @} */
225
226/** @name C Implementations
227 * @{
228 */
229
230/**
231 * Implements a 16-bit popa.
232 */
233IEM_CIMPL_DEF_0(iemCImpl_popa_16)
234{
235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
236 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
237 RTGCPTR GCPtrLast = GCPtrStart + 15;
238 VBOXSTRICTRC rcStrict;
239
240 /*
241 * The docs are a bit hard to comprehend here, but it looks like we wrap
242 * around in real mode as long as none of the individual "popa" crosses the
243 * end of the stack segment. In protected mode we check the whole access
244 * in one go. For efficiency, only do the word-by-word thing if we're in
245 * danger of wrapping around.
246 */
247 /** @todo do popa boundary / wrap-around checks. */
248 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
249 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
250 {
251 /* word-by-word */
252 RTUINT64U TmpRsp;
253 TmpRsp.u = pCtx->rsp;
254 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 {
261 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
262 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
263 }
264 if (rcStrict == VINF_SUCCESS)
265 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint16_t const *pa16Mem = NULL;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
283 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
284 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
285 /* skip sp */
286 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
287 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
288 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
289 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pIemCpu, pCtx, 16);
294 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 32-bit popa.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_popa_32)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
309 RTGCPTR GCPtrLast = GCPtrStart + 31;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "popa" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do popa boundary / wrap-around checks. */
320 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
321 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
334 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
335 }
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 {
344#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
345 pCtx->rdi &= UINT32_MAX;
346 pCtx->rsi &= UINT32_MAX;
347 pCtx->rbp &= UINT32_MAX;
348 pCtx->rbx &= UINT32_MAX;
349 pCtx->rdx &= UINT32_MAX;
350 pCtx->rcx &= UINT32_MAX;
351 pCtx->rax &= UINT32_MAX;
352#endif
353 pCtx->rsp = TmpRsp.u;
354 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
355 }
356 }
357 else
358 {
359 uint32_t const *pa32Mem;
360 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
361 if (rcStrict == VINF_SUCCESS)
362 {
363 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
364 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
365 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
366 /* skip esp */
367 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
368 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
369 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
370 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
371 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 iemRegAddToRsp(pIemCpu, pCtx, 32);
375 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
376 }
377 }
378 }
379 return rcStrict;
380}
381
382
383/**
384 * Implements a 16-bit pusha.
385 */
386IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
387{
388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
389 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
390 RTGCPTR GCPtrBottom = GCPtrTop - 15;
391 VBOXSTRICTRC rcStrict;
392
393 /*
394 * The docs are a bit hard to comprehend here, but it looks like we wrap
395 * around in real mode as long as none of the individual "pushd" crosses the
396 * end of the stack segment. In protected mode we check the whole access
397 * in one go. For efficiency, only do the word-by-word thing if we're in
398 * danger of wrapping around.
399 */
400 /** @todo do pusha boundary / wrap-around checks. */
401 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
402 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
403 {
404 /* word-by-word */
405 RTUINT64U TmpRsp;
406 TmpRsp.u = pCtx->rsp;
407 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 {
424 pCtx->rsp = TmpRsp.u;
425 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
426 }
427 }
428 else
429 {
430 GCPtrBottom--;
431 uint16_t *pa16Mem = NULL;
432 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
433 if (rcStrict == VINF_SUCCESS)
434 {
435 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
436 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
437 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
438 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
439 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
440 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
441 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
442 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
443 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 iemRegSubFromRsp(pIemCpu, pCtx, 16);
447 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
448 }
449 }
450 }
451 return rcStrict;
452}
453
454
455/**
456 * Implements a 32-bit pusha.
457 */
458IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
459{
460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
461 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
462 RTGCPTR GCPtrBottom = GCPtrTop - 31;
463 VBOXSTRICTRC rcStrict;
464
465 /*
466 * The docs are a bit hard to comprehend here, but it looks like we wrap
467 * around in real mode as long as none of the individual "pusha" crosses the
468 * end of the stack segment. In protected mode we check the whole access
469 * in one go. For efficiency, only do the word-by-word thing if we're in
470 * danger of wrapping around.
471 */
472 /** @todo do pusha boundary / wrap-around checks. */
473 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
474 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
475 {
476 /* word-by-word */
477 RTUINT64U TmpRsp;
478 TmpRsp.u = pCtx->rsp;
479 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
480 if (rcStrict == VINF_SUCCESS)
481 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 {
496 pCtx->rsp = TmpRsp.u;
497 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
498 }
499 }
500 else
501 {
502 GCPtrBottom--;
503 uint32_t *pa32Mem;
504 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
505 if (rcStrict == VINF_SUCCESS)
506 {
507 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
508 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
509 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
510 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
511 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
512 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
513 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
514 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
515 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 iemRegSubFromRsp(pIemCpu, pCtx, 32);
519 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
520 }
521 }
522 }
523 return rcStrict;
524}
525
526
527/**
528 * Implements pushf.
529 *
530 *
531 * @param enmEffOpSize The effective operand size.
532 */
533IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
534{
535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
536
537 /*
538 * If we're in V8086 mode some care is required (which is why we're in
539 * doing this in a C implementation).
540 */
541 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
542 if ( (fEfl & X86_EFL_VM)
543 && X86_EFL_GET_IOPL(fEfl) != 3 )
544 {
545 Assert(pCtx->cr0 & X86_CR0_PE);
546 if ( enmEffOpSize != IEMMODE_16BIT
547 || !(pCtx->cr4 & X86_CR4_VME))
548 return iemRaiseGeneralProtectionFault0(pIemCpu);
549 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
550 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
551 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
552 }
553
554 /*
555 * Ok, clear RF and VM and push the flags.
556 */
557 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
558
559 VBOXSTRICTRC rcStrict;
560 switch (enmEffOpSize)
561 {
562 case IEMMODE_16BIT:
563 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
564 break;
565 case IEMMODE_32BIT:
566 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
567 break;
568 case IEMMODE_64BIT:
569 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
570 break;
571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
572 }
573 if (rcStrict != VINF_SUCCESS)
574 return rcStrict;
575
576 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Implements popf.
583 *
584 * @param enmEffOpSize The effective operand size.
585 */
586IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
587{
588 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
589 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
590 VBOXSTRICTRC rcStrict;
591 uint32_t fEflNew;
592
593 /*
594 * V8086 is special as usual.
595 */
596 if (fEflOld & X86_EFL_VM)
597 {
598 /*
599 * Almost anything goes if IOPL is 3.
600 */
601 if (X86_EFL_GET_IOPL(fEflOld) == 3)
602 {
603 switch (enmEffOpSize)
604 {
605 case IEMMODE_16BIT:
606 {
607 uint16_t u16Value;
608 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
609 if (rcStrict != VINF_SUCCESS)
610 return rcStrict;
611 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
612 break;
613 }
614 case IEMMODE_32BIT:
615 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
616 if (rcStrict != VINF_SUCCESS)
617 return rcStrict;
618 break;
619 IEM_NOT_REACHED_DEFAULT_CASE_RET();
620 }
621
622 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
623 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
624 }
625 /*
626 * Interrupt flag virtualization with CR4.VME=1.
627 */
628 else if ( enmEffOpSize == IEMMODE_16BIT
629 && (pCtx->cr4 & X86_CR4_VME) )
630 {
631 uint16_t u16Value;
632 RTUINT64U TmpRsp;
633 TmpRsp.u = pCtx->rsp;
634 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
635 if (rcStrict != VINF_SUCCESS)
636 return rcStrict;
637
638 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
639 * or before? */
640 if ( ( (u16Value & X86_EFL_IF)
641 && (fEflOld & X86_EFL_VIP))
642 || (u16Value & X86_EFL_TF) )
643 return iemRaiseGeneralProtectionFault0(pIemCpu);
644
645 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
646 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
647 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
648 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
649
650 pCtx->rsp = TmpRsp.u;
651 }
652 else
653 return iemRaiseGeneralProtectionFault0(pIemCpu);
654
655 }
656 /*
657 * Not in V8086 mode.
658 */
659 else
660 {
661 /* Pop the flags. */
662 switch (enmEffOpSize)
663 {
664 case IEMMODE_16BIT:
665 {
666 uint16_t u16Value;
667 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
671 break;
672 }
673 case IEMMODE_32BIT:
674 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
675 if (rcStrict != VINF_SUCCESS)
676 return rcStrict;
677 break;
678 case IEMMODE_64BIT:
679 {
680 uint64_t u64Value;
681 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
682 if (rcStrict != VINF_SUCCESS)
683 return rcStrict;
684 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
685 break;
686 }
687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
688 }
689
690 /* Merge them with the current flags. */
691 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
692 || pIemCpu->uCpl == 0)
693 {
694 fEflNew &= X86_EFL_POPF_BITS;
695 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
696 }
697 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
698 {
699 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
700 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
701 }
702 else
703 {
704 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
705 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
706 }
707 }
708
709 /*
710 * Commit the flags.
711 */
712 Assert(fEflNew & RT_BIT_32(1));
713 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
714 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
715
716 return VINF_SUCCESS;
717}
718
719
720/**
721 * Implements an indirect call.
722 *
723 * @param uNewPC The new program counter (RIP) value (loaded from the
724 * operand).
725 * @param enmEffOpSize The effective operand size.
726 */
727IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
728{
729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
730 uint16_t uOldPC = pCtx->ip + cbInstr;
731 if (uNewPC > pCtx->cs.u32Limit)
732 return iemRaiseGeneralProtectionFault0(pIemCpu);
733
734 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
735 if (rcStrict != VINF_SUCCESS)
736 return rcStrict;
737
738 pCtx->rip = uNewPC;
739 pCtx->eflags.Bits.u1RF = 0;
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Implements a 16-bit relative call.
746 *
747 * @param offDisp The displacment offset.
748 */
749IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
750{
751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
752 uint16_t uOldPC = pCtx->ip + cbInstr;
753 uint16_t uNewPC = uOldPC + offDisp;
754 if (uNewPC > pCtx->cs.u32Limit)
755 return iemRaiseGeneralProtectionFault0(pIemCpu);
756
757 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
758 if (rcStrict != VINF_SUCCESS)
759 return rcStrict;
760
761 pCtx->rip = uNewPC;
762 pCtx->eflags.Bits.u1RF = 0;
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * Implements a 32-bit indirect call.
769 *
770 * @param uNewPC The new program counter (RIP) value (loaded from the
771 * operand).
772 * @param enmEffOpSize The effective operand size.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint32_t uOldPC = pCtx->eip + cbInstr;
778 if (uNewPC > pCtx->cs.u32Limit)
779 return iemRaiseGeneralProtectionFault0(pIemCpu);
780
781 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
782 if (rcStrict != VINF_SUCCESS)
783 return rcStrict;
784
785#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
786 /*
787 * CASM hook for recording interesting indirect calls.
788 */
789 if ( !pCtx->eflags.Bits.u1IF
790 && (pCtx->cr0 & X86_CR0_PG)
791 && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu))
792 && pIemCpu->uCpl == 0)
793 {
794 EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu));
795 if ( enmState == EMSTATE_IEM_THEN_REM
796 || enmState == EMSTATE_IEM
797 || enmState == EMSTATE_REM)
798 CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
799 }
800#endif
801
802 pCtx->rip = uNewPC;
803 pCtx->eflags.Bits.u1RF = 0;
804 return VINF_SUCCESS;
805}
806
807
808/**
809 * Implements a 32-bit relative call.
810 *
811 * @param offDisp The displacment offset.
812 */
813IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
814{
815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
816 uint32_t uOldPC = pCtx->eip + cbInstr;
817 uint32_t uNewPC = uOldPC + offDisp;
818 if (uNewPC > pCtx->cs.u32Limit)
819 return iemRaiseGeneralProtectionFault0(pIemCpu);
820
821 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
822 if (rcStrict != VINF_SUCCESS)
823 return rcStrict;
824
825 pCtx->rip = uNewPC;
826 pCtx->eflags.Bits.u1RF = 0;
827 return VINF_SUCCESS;
828}
829
830
831/**
832 * Implements a 64-bit indirect call.
833 *
834 * @param uNewPC The new program counter (RIP) value (loaded from the
835 * operand).
836 * @param enmEffOpSize The effective operand size.
837 */
838IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
839{
840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
841 uint64_t uOldPC = pCtx->rip + cbInstr;
842 if (!IEM_IS_CANONICAL(uNewPC))
843 return iemRaiseGeneralProtectionFault0(pIemCpu);
844
845 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
846 if (rcStrict != VINF_SUCCESS)
847 return rcStrict;
848
849 pCtx->rip = uNewPC;
850 pCtx->eflags.Bits.u1RF = 0;
851 return VINF_SUCCESS;
852}
853
854
855/**
856 * Implements a 64-bit relative call.
857 *
858 * @param offDisp The displacment offset.
859 */
860IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
861{
862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
863 uint64_t uOldPC = pCtx->rip + cbInstr;
864 uint64_t uNewPC = uOldPC + offDisp;
865 if (!IEM_IS_CANONICAL(uNewPC))
866 return iemRaiseNotCanonical(pIemCpu);
867
868 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
869 if (rcStrict != VINF_SUCCESS)
870 return rcStrict;
871
872 pCtx->rip = uNewPC;
873 pCtx->eflags.Bits.u1RF = 0;
874 return VINF_SUCCESS;
875}
876
877
878/**
879 * Implements far jumps and calls thru task segments (TSS).
880 *
881 * @param uSel The selector.
882 * @param enmBranch The kind of branching we're performing.
883 * @param enmEffOpSize The effective operand size.
884 * @param pDesc The descriptor corresponding to @a uSel. The type is
885 * task gate.
886 */
887IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
888{
889#ifndef IEM_IMPLEMENTS_TASKSWITCH
890 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
891#else
892 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
893 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
894 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
895
896 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
897 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
898 {
899 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
900 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
901 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
902 }
903
904 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
905 * far calls (see iemCImpl_callf). Most likely in both cases it should be
906 * checked here, need testcases. */
907 if (!pDesc->Legacy.Gen.u1Present)
908 {
909 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
910 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
911 }
912
913 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
914 uint32_t uNextEip = pCtx->eip + cbInstr;
915 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
916 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
917#endif
918}
919
920
921/**
922 * Implements far jumps and calls thru task gates.
923 *
924 * @param uSel The selector.
925 * @param enmBranch The kind of branching we're performing.
926 * @param enmEffOpSize The effective operand size.
927 * @param pDesc The descriptor corresponding to @a uSel. The type is
928 * task gate.
929 */
930IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
931{
932#ifndef IEM_IMPLEMENTS_TASKSWITCH
933 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
934#else
935 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
936
937 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
938 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
939 {
940 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
941 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
942 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
943 }
944
945 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
946 * far calls (see iemCImpl_callf). Most likely in both cases it should be
947 * checked here, need testcases. */
948 if (!pDesc->Legacy.Gen.u1Present)
949 {
950 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
951 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
952 }
953
954 /*
955 * Fetch the new TSS descriptor from the GDT.
956 */
957 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
958 if (uSelTss & X86_SEL_LDT)
959 {
960 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
961 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
962 }
963
964 IEMSELDESC TssDesc;
965 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP);
966 if (rcStrict != VINF_SUCCESS)
967 return rcStrict;
968
969 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
970 {
971 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
972 TssDesc.Legacy.Gate.u4Type));
973 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
974 }
975
976 if (!TssDesc.Legacy.Gate.u1Present)
977 {
978 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
979 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
980 }
981
982 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
983 uint32_t uNextEip = pCtx->eip + cbInstr;
984 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
985 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
986#endif
987}
988
989
990/**
991 * Implements far jumps and calls thru call gates.
992 *
993 * @param uSel The selector.
994 * @param enmBranch The kind of branching we're performing.
995 * @param enmEffOpSize The effective operand size.
996 * @param pDesc The descriptor corresponding to @a uSel. The type is
997 * call gate.
998 */
999IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1000{
1001#ifndef IEM_IMPLEMENTS_CALLGATE
1002 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1003#else
1004 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1005 * inter-privilege calls and are much more complex.
1006 *
1007 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1008 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1009 * must be 16-bit or 32-bit.
1010 */
1011 /** @todo: effective operand size is probably irrelevant here, only the
1012 * call gate bitness matters??
1013 */
1014 VBOXSTRICTRC rcStrict;
1015 RTPTRUNION uPtrRet;
1016 uint64_t uNewRsp;
1017 uint64_t uNewRip;
1018 uint64_t u64Base;
1019 uint32_t cbLimit;
1020 RTSEL uNewCS;
1021 IEMSELDESC DescCS;
1022 PCPUMCTX pCtx;
1023
1024 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1025 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1026 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1027 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1028
1029 /* Determine the new instruction pointer from the gate descriptor. */
1030 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1031 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1032 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1033
1034 /* Perform DPL checks on the gate descriptor. */
1035 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
1036 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1037 {
1038 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1039 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
1040 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1041 }
1042
1043 /** @todo does this catch NULL selectors, too? */
1044 if (!pDesc->Legacy.Gen.u1Present)
1045 {
1046 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1047 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1048 }
1049
1050 /*
1051 * Fetch the target CS descriptor from the GDT or LDT.
1052 */
1053 uNewCS = pDesc->Legacy.Gate.u16Sel;
1054 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP);
1055 if (rcStrict != VINF_SUCCESS)
1056 return rcStrict;
1057
1058 /* Target CS must be a code selector. */
1059 if ( !DescCS.Legacy.Gen.u1DescType
1060 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1061 {
1062 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1063 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1064 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1065 }
1066
1067 /* Privilege checks on target CS. */
1068 if (enmBranch == IEMBRANCH_JUMP)
1069 {
1070 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1071 {
1072 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1073 {
1074 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1075 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1076 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1077 }
1078 }
1079 else
1080 {
1081 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1082 {
1083 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1084 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1085 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1086 }
1087 }
1088 }
1089 else
1090 {
1091 Assert(enmBranch == IEMBRANCH_CALL);
1092 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1093 {
1094 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1095 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1096 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1097 }
1098 }
1099
1100 /* Additional long mode checks. */
1101 if (IEM_IS_LONG_MODE(pIemCpu))
1102 {
1103 if (!DescCS.Legacy.Gen.u1Long)
1104 {
1105 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1106 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1107 }
1108
1109 /* L vs D. */
1110 if ( DescCS.Legacy.Gen.u1Long
1111 && DescCS.Legacy.Gen.u1DefBig)
1112 {
1113 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1114 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1115 }
1116 }
1117
1118 if (!DescCS.Legacy.Gate.u1Present)
1119 {
1120 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1121 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1122 }
1123
1124 pCtx = pIemCpu->CTX_SUFF(pCtx);
1125
1126 if (enmBranch == IEMBRANCH_JUMP)
1127 {
1128 /** @todo: This is very similar to regular far jumps; merge! */
1129 /* Jumps are fairly simple... */
1130
1131 /* Chop the high bits off if 16-bit gate (Intel says so). */
1132 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1133 uNewRip = (uint16_t)uNewRip;
1134
1135 /* Limit check for non-long segments. */
1136 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1137 if (DescCS.Legacy.Gen.u1Long)
1138 u64Base = 0;
1139 else
1140 {
1141 if (uNewRip > cbLimit)
1142 {
1143 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1144 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1145 }
1146 u64Base = X86DESC_BASE(&DescCS.Legacy);
1147 }
1148
1149 /* Canonical address check. */
1150 if (!IEM_IS_CANONICAL(uNewRip))
1151 {
1152 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1153 return iemRaiseNotCanonical(pIemCpu);
1154 }
1155
1156 /*
1157 * Ok, everything checked out fine. Now set the accessed bit before
1158 * committing the result into CS, CSHID and RIP.
1159 */
1160 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1161 {
1162 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1163 if (rcStrict != VINF_SUCCESS)
1164 return rcStrict;
1165 /** @todo check what VT-x and AMD-V does. */
1166 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1167 }
1168
1169 /* commit */
1170 pCtx->rip = uNewRip;
1171 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1172 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1173 pCtx->cs.ValidSel = pCtx->cs.Sel;
1174 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1175 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1176 pCtx->cs.u32Limit = cbLimit;
1177 pCtx->cs.u64Base = u64Base;
1178 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1179 }
1180 else
1181 {
1182 Assert(enmBranch == IEMBRANCH_CALL);
1183 /* Calls are much more complicated. */
1184
1185 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl)
1186 {
1187 uint16_t offNewStack; /* Offset of new stack in TSS. */
1188 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1189 uint8_t uNewCSDpl;
1190 uint8_t cbWords;
1191 RTSEL uNewSS;
1192 RTSEL uOldSS;
1193 uint64_t uOldRsp;
1194 IEMSELDESC DescSS;
1195 RTPTRUNION uPtrTSS;
1196 RTGCPTR GCPtrTSS;
1197 RTPTRUNION uPtrParmWds;
1198 RTGCPTR GCPtrParmWds;
1199
1200 /* More privilege. This is the fun part. */
1201 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1202
1203 /*
1204 * Determine new SS:rSP from the TSS.
1205 */
1206 Assert(!pCtx->tr.Attr.n.u1DescType);
1207
1208 /* Figure out where the new stack pointer is stored in the TSS. */
1209 uNewCSDpl = uNewCS & X86_SEL_RPL;
1210 if (!IEM_IS_LONG_MODE(pIemCpu))
1211 {
1212 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1213 {
1214 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1215 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1216 }
1217 else
1218 {
1219 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1220 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1221 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1222 }
1223 }
1224 else
1225 {
1226 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1227 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1228 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1229 }
1230
1231 /* Check against TSS limit. */
1232 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1233 {
1234 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1235 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel);
1236 }
1237
1238 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1239 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1240 if (rcStrict != VINF_SUCCESS)
1241 {
1242 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1243 return rcStrict;
1244 }
1245
1246 if (!IEM_IS_LONG_MODE(pIemCpu))
1247 {
1248 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1249 {
1250 uNewRsp = uPtrTSS.pu32[0];
1251 uNewSS = uPtrTSS.pu16[2];
1252 }
1253 else
1254 {
1255 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1256 uNewRsp = uPtrTSS.pu16[0];
1257 uNewSS = uPtrTSS.pu16[1];
1258 }
1259 }
1260 else
1261 {
1262 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1263 /* SS will be a NULL selector, but that's valid. */
1264 uNewRsp = uPtrTSS.pu64[0];
1265 uNewSS = uNewCSDpl;
1266 }
1267
1268 /* Done with the TSS now. */
1269 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1270 if (rcStrict != VINF_SUCCESS)
1271 {
1272 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1273 return rcStrict;
1274 }
1275
1276 /* Only used outside of long mode. */
1277 cbWords = pDesc->Legacy.Gate.u4ParmCount;
1278
1279 /* If EFER.LMA is 0, there's extra work to do. */
1280 if (!IEM_IS_LONG_MODE(pIemCpu))
1281 {
1282 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1283 {
1284 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1285 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1286 }
1287
1288 /* Grab the new SS descriptor. */
1289 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1290 if (rcStrict != VINF_SUCCESS)
1291 return rcStrict;
1292
1293 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1294 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1295 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1296 {
1297 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1298 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1299 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1300 }
1301
1302 /* Ensure new SS is a writable data segment. */
1303 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1304 {
1305 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1306 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1307 }
1308
1309 if (!DescSS.Legacy.Gen.u1Present)
1310 {
1311 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1312 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
1313 }
1314 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1315 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1316 else
1317 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1318 }
1319 else
1320 {
1321 /* Just grab the new (NULL) SS descriptor. */
1322 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1323 if (rcStrict != VINF_SUCCESS)
1324 return rcStrict;
1325
1326 cbNewStack = sizeof(uint64_t) * 4;
1327 }
1328
1329 /** @todo: According to Intel, new stack is checked for enough space first,
1330 * then switched. According to AMD, the stack is switched first and
1331 * then pushes might fault!
1332 */
1333
1334 /** @todo: According to AMD, CS is loaded first, then SS.
1335 * According to Intel, it's the other way around!?
1336 */
1337
1338 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1339
1340 /* Set the accessed bit before committing new SS. */
1341 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1342 {
1343 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
1344 if (rcStrict != VINF_SUCCESS)
1345 return rcStrict;
1346 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1347 }
1348
1349 /* Remember the old SS:rSP and their linear address. */
1350 uOldSS = pCtx->ss.Sel;
1351 uOldRsp = pCtx->rsp;
1352
1353 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp;
1354
1355 /* Commit new SS:rSP. */
1356 pCtx->ss.Sel = uNewSS;
1357 pCtx->ss.ValidSel = uNewSS;
1358 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1359 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1360 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1361 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1362 pCtx->rsp = uNewRsp;
1363 pIemCpu->uCpl = uNewCSDpl;
1364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
1365 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
1366
1367 /* Check new stack - may #SS(NewSS). */
1368 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack,
1369 &uPtrRet.pv, &uNewRsp);
1370 if (rcStrict != VINF_SUCCESS)
1371 {
1372 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1373 return rcStrict;
1374 }
1375
1376 if (!IEM_IS_LONG_MODE(pIemCpu))
1377 {
1378 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1379 {
1380 /* Push the old CS:rIP. */
1381 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1382 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1383
1384 /* Map the relevant chunk of the old stack. */
1385 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1386 if (rcStrict != VINF_SUCCESS)
1387 {
1388 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1389 return rcStrict;
1390 }
1391
1392 /* Copy the parameter (d)words. */
1393 for (int i = 0; i < cbWords; ++i)
1394 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1395
1396 /* Unmap the old stack. */
1397 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1398 if (rcStrict != VINF_SUCCESS)
1399 {
1400 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1401 return rcStrict;
1402 }
1403
1404 /* Push the old SS:rSP. */
1405 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1406 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1407 }
1408 else
1409 {
1410 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1411
1412 /* Push the old CS:rIP. */
1413 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1414 uPtrRet.pu16[1] = pCtx->cs.Sel;
1415
1416 /* Map the relevant chunk of the old stack. */
1417 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1418 if (rcStrict != VINF_SUCCESS)
1419 {
1420 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1421 return rcStrict;
1422 }
1423
1424 /* Copy the parameter words. */
1425 for (int i = 0; i < cbWords; ++i)
1426 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1427
1428 /* Unmap the old stack. */
1429 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1430 if (rcStrict != VINF_SUCCESS)
1431 {
1432 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1433 return rcStrict;
1434 }
1435
1436 /* Push the old SS:rSP. */
1437 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1438 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1439 }
1440 }
1441 else
1442 {
1443 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1444
1445 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1446 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1447 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1448 uPtrRet.pu64[2] = uOldRsp;
1449 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1450 }
1451
1452 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1453 if (rcStrict != VINF_SUCCESS)
1454 {
1455 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1456 return rcStrict;
1457 }
1458
1459 /* Chop the high bits off if 16-bit gate (Intel says so). */
1460 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1461 uNewRip = (uint16_t)uNewRip;
1462
1463 /* Limit / canonical check. */
1464 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1465 if (!IEM_IS_LONG_MODE(pIemCpu))
1466 {
1467 if (uNewRip > cbLimit)
1468 {
1469 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1470 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1471 }
1472 u64Base = X86DESC_BASE(&DescCS.Legacy);
1473 }
1474 else
1475 {
1476 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1477 if (!IEM_IS_CANONICAL(uNewRip))
1478 {
1479 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1480 return iemRaiseNotCanonical(pIemCpu);
1481 }
1482 u64Base = 0;
1483 }
1484
1485 /*
1486 * Now set the accessed bit before
1487 * writing the return address to the stack and committing the result into
1488 * CS, CSHID and RIP.
1489 */
1490 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1491 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1492 {
1493 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1494 if (rcStrict != VINF_SUCCESS)
1495 return rcStrict;
1496 /** @todo check what VT-x and AMD-V does. */
1497 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1498 }
1499
1500 /* Commit new CS:rIP. */
1501 pCtx->rip = uNewRip;
1502 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1503 pCtx->cs.Sel |= pIemCpu->uCpl;
1504 pCtx->cs.ValidSel = pCtx->cs.Sel;
1505 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1506 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1507 pCtx->cs.u32Limit = cbLimit;
1508 pCtx->cs.u64Base = u64Base;
1509 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1510 }
1511 else
1512 {
1513 /* Same privilege. */
1514 /** @todo: This is very similar to regular far calls; merge! */
1515
1516 /* Check stack first - may #SS(0). */
1517 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1518 * 16-bit code cause a two or four byte CS to be pushed? */
1519 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1520 IEM_IS_LONG_MODE(pIemCpu) ? 8+8
1521 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1522 &uPtrRet.pv, &uNewRsp);
1523 if (rcStrict != VINF_SUCCESS)
1524 return rcStrict;
1525
1526 /* Chop the high bits off if 16-bit gate (Intel says so). */
1527 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1528 uNewRip = (uint16_t)uNewRip;
1529
1530 /* Limit / canonical check. */
1531 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1532 if (!IEM_IS_LONG_MODE(pIemCpu))
1533 {
1534 if (uNewRip > cbLimit)
1535 {
1536 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1537 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1538 }
1539 u64Base = X86DESC_BASE(&DescCS.Legacy);
1540 }
1541 else
1542 {
1543 if (!IEM_IS_CANONICAL(uNewRip))
1544 {
1545 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1546 return iemRaiseNotCanonical(pIemCpu);
1547 }
1548 u64Base = 0;
1549 }
1550
1551 /*
1552 * Now set the accessed bit before
1553 * writing the return address to the stack and committing the result into
1554 * CS, CSHID and RIP.
1555 */
1556 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1557 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1558 {
1559 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1560 if (rcStrict != VINF_SUCCESS)
1561 return rcStrict;
1562 /** @todo check what VT-x and AMD-V does. */
1563 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1564 }
1565
1566 /* stack */
1567 if (!IEM_IS_LONG_MODE(pIemCpu))
1568 {
1569 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1570 {
1571 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1572 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1573 }
1574 else
1575 {
1576 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1577 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1578 uPtrRet.pu16[1] = pCtx->cs.Sel;
1579 }
1580 }
1581 else
1582 {
1583 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1584 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1585 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1586 }
1587
1588 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1589 if (rcStrict != VINF_SUCCESS)
1590 return rcStrict;
1591
1592 /* commit */
1593 pCtx->rip = uNewRip;
1594 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1595 pCtx->cs.Sel |= pIemCpu->uCpl;
1596 pCtx->cs.ValidSel = pCtx->cs.Sel;
1597 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1598 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1599 pCtx->cs.u32Limit = cbLimit;
1600 pCtx->cs.u64Base = u64Base;
1601 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1602 }
1603 }
1604 pCtx->eflags.Bits.u1RF = 0;
1605 return VINF_SUCCESS;
1606#endif
1607}
1608
1609
1610/**
1611 * Implements far jumps and calls thru system selectors.
1612 *
1613 * @param uSel The selector.
1614 * @param enmBranch The kind of branching we're performing.
1615 * @param enmEffOpSize The effective operand size.
1616 * @param pDesc The descriptor corresponding to @a uSel.
1617 */
1618IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1619{
1620 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1621 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1622
1623 if (IEM_IS_LONG_MODE(pIemCpu))
1624 switch (pDesc->Legacy.Gen.u4Type)
1625 {
1626 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1627 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1628
1629 default:
1630 case AMD64_SEL_TYPE_SYS_LDT:
1631 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1632 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1633 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1634 case AMD64_SEL_TYPE_SYS_INT_GATE:
1635 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1636 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1637 }
1638
1639 switch (pDesc->Legacy.Gen.u4Type)
1640 {
1641 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1642 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1643 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1644
1645 case X86_SEL_TYPE_SYS_TASK_GATE:
1646 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1647
1648 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1649 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1650 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1651
1652 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1653 Log(("branch %04x -> busy 286 TSS\n", uSel));
1654 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1655
1656 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1657 Log(("branch %04x -> busy 386 TSS\n", uSel));
1658 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1659
1660 default:
1661 case X86_SEL_TYPE_SYS_LDT:
1662 case X86_SEL_TYPE_SYS_286_INT_GATE:
1663 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1664 case X86_SEL_TYPE_SYS_386_INT_GATE:
1665 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1666 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1667 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1668 }
1669}
1670
1671
1672/**
1673 * Implements far jumps.
1674 *
1675 * @param uSel The selector.
1676 * @param offSeg The segment offset.
1677 * @param enmEffOpSize The effective operand size.
1678 */
1679IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1680{
1681 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1682 NOREF(cbInstr);
1683 Assert(offSeg <= UINT32_MAX);
1684
1685 /*
1686 * Real mode and V8086 mode are easy. The only snag seems to be that
1687 * CS.limit doesn't change and the limit check is done against the current
1688 * limit.
1689 */
1690 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1691 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1692 {
1693 if (offSeg > pCtx->cs.u32Limit)
1694 return iemRaiseGeneralProtectionFault0(pIemCpu);
1695
1696 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1697 pCtx->rip = offSeg;
1698 else
1699 pCtx->rip = offSeg & UINT16_MAX;
1700 pCtx->cs.Sel = uSel;
1701 pCtx->cs.ValidSel = uSel;
1702 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1703 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1704 pCtx->eflags.Bits.u1RF = 0;
1705 return VINF_SUCCESS;
1706 }
1707
1708 /*
1709 * Protected mode. Need to parse the specified descriptor...
1710 */
1711 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1712 {
1713 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1714 return iemRaiseGeneralProtectionFault0(pIemCpu);
1715 }
1716
1717 /* Fetch the descriptor. */
1718 IEMSELDESC Desc;
1719 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1720 if (rcStrict != VINF_SUCCESS)
1721 return rcStrict;
1722
1723 /* Is it there? */
1724 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1725 {
1726 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1727 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1728 }
1729
1730 /*
1731 * Deal with it according to its type. We do the standard code selectors
1732 * here and dispatch the system selectors to worker functions.
1733 */
1734 if (!Desc.Legacy.Gen.u1DescType)
1735 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1736
1737 /* Only code segments. */
1738 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1739 {
1740 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1741 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1742 }
1743
1744 /* L vs D. */
1745 if ( Desc.Legacy.Gen.u1Long
1746 && Desc.Legacy.Gen.u1DefBig
1747 && IEM_IS_LONG_MODE(pIemCpu))
1748 {
1749 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1750 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1751 }
1752
1753 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1754 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1755 {
1756 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1757 {
1758 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1759 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1760 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1761 }
1762 }
1763 else
1764 {
1765 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1766 {
1767 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1768 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1769 }
1770 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1771 {
1772 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1773 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1774 }
1775 }
1776
1777 /* Chop the high bits if 16-bit (Intel says so). */
1778 if (enmEffOpSize == IEMMODE_16BIT)
1779 offSeg &= UINT16_MAX;
1780
1781 /* Limit check. (Should alternatively check for non-canonical addresses
1782 here, but that is ruled out by offSeg being 32-bit, right?) */
1783 uint64_t u64Base;
1784 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1785 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1786 u64Base = 0;
1787 else
1788 {
1789 if (offSeg > cbLimit)
1790 {
1791 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1792 /** @todo: Intel says this is #GP(0)! */
1793 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1794 }
1795 u64Base = X86DESC_BASE(&Desc.Legacy);
1796 }
1797
1798 /*
1799 * Ok, everything checked out fine. Now set the accessed bit before
1800 * committing the result into CS, CSHID and RIP.
1801 */
1802 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1803 {
1804 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1805 if (rcStrict != VINF_SUCCESS)
1806 return rcStrict;
1807 /** @todo check what VT-x and AMD-V does. */
1808 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1809 }
1810
1811 /* commit */
1812 pCtx->rip = offSeg;
1813 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1814 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1815 pCtx->cs.ValidSel = pCtx->cs.Sel;
1816 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1817 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1818 pCtx->cs.u32Limit = cbLimit;
1819 pCtx->cs.u64Base = u64Base;
1820 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1821 pCtx->eflags.Bits.u1RF = 0;
1822 /** @todo check if the hidden bits are loaded correctly for 64-bit
1823 * mode. */
1824 return VINF_SUCCESS;
1825}
1826
1827
1828/**
1829 * Implements far calls.
1830 *
1831 * This very similar to iemCImpl_FarJmp.
1832 *
1833 * @param uSel The selector.
1834 * @param offSeg The segment offset.
1835 * @param enmEffOpSize The operand size (in case we need it).
1836 */
1837IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1838{
1839 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1840 VBOXSTRICTRC rcStrict;
1841 uint64_t uNewRsp;
1842 RTPTRUNION uPtrRet;
1843
1844 /*
1845 * Real mode and V8086 mode are easy. The only snag seems to be that
1846 * CS.limit doesn't change and the limit check is done against the current
1847 * limit.
1848 */
1849 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1850 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1851 {
1852 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1853
1854 /* Check stack first - may #SS(0). */
1855 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1856 &uPtrRet.pv, &uNewRsp);
1857 if (rcStrict != VINF_SUCCESS)
1858 return rcStrict;
1859
1860 /* Check the target address range. */
1861 if (offSeg > UINT32_MAX)
1862 return iemRaiseGeneralProtectionFault0(pIemCpu);
1863
1864 /* Everything is fine, push the return address. */
1865 if (enmEffOpSize == IEMMODE_16BIT)
1866 {
1867 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1868 uPtrRet.pu16[1] = pCtx->cs.Sel;
1869 }
1870 else
1871 {
1872 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1873 uPtrRet.pu16[3] = pCtx->cs.Sel;
1874 }
1875 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1876 if (rcStrict != VINF_SUCCESS)
1877 return rcStrict;
1878
1879 /* Branch. */
1880 pCtx->rip = offSeg;
1881 pCtx->cs.Sel = uSel;
1882 pCtx->cs.ValidSel = uSel;
1883 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1884 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1885 pCtx->eflags.Bits.u1RF = 0;
1886 return VINF_SUCCESS;
1887 }
1888
1889 /*
1890 * Protected mode. Need to parse the specified descriptor...
1891 */
1892 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1893 {
1894 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1895 return iemRaiseGeneralProtectionFault0(pIemCpu);
1896 }
1897
1898 /* Fetch the descriptor. */
1899 IEMSELDESC Desc;
1900 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1901 if (rcStrict != VINF_SUCCESS)
1902 return rcStrict;
1903
1904 /*
1905 * Deal with it according to its type. We do the standard code selectors
1906 * here and dispatch the system selectors to worker functions.
1907 */
1908 if (!Desc.Legacy.Gen.u1DescType)
1909 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1910
1911 /* Only code segments. */
1912 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1913 {
1914 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1915 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1916 }
1917
1918 /* L vs D. */
1919 if ( Desc.Legacy.Gen.u1Long
1920 && Desc.Legacy.Gen.u1DefBig
1921 && IEM_IS_LONG_MODE(pIemCpu))
1922 {
1923 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1924 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1925 }
1926
1927 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1928 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1929 {
1930 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1931 {
1932 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1933 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1934 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1935 }
1936 }
1937 else
1938 {
1939 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1940 {
1941 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1942 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1943 }
1944 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1945 {
1946 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1947 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1948 }
1949 }
1950
1951 /* Is it there? */
1952 if (!Desc.Legacy.Gen.u1Present)
1953 {
1954 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1955 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1956 }
1957
1958 /* Check stack first - may #SS(0). */
1959 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1960 * 16-bit code cause a two or four byte CS to be pushed? */
1961 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1962 enmEffOpSize == IEMMODE_64BIT ? 8+8
1963 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1964 &uPtrRet.pv, &uNewRsp);
1965 if (rcStrict != VINF_SUCCESS)
1966 return rcStrict;
1967
1968 /* Chop the high bits if 16-bit (Intel says so). */
1969 if (enmEffOpSize == IEMMODE_16BIT)
1970 offSeg &= UINT16_MAX;
1971
1972 /* Limit / canonical check. */
1973 uint64_t u64Base;
1974 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1975 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1976 {
1977 if (!IEM_IS_CANONICAL(offSeg))
1978 {
1979 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1980 return iemRaiseNotCanonical(pIemCpu);
1981 }
1982 u64Base = 0;
1983 }
1984 else
1985 {
1986 if (offSeg > cbLimit)
1987 {
1988 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1989 /** @todo: Intel says this is #GP(0)! */
1990 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1991 }
1992 u64Base = X86DESC_BASE(&Desc.Legacy);
1993 }
1994
1995 /*
1996 * Now set the accessed bit before
1997 * writing the return address to the stack and committing the result into
1998 * CS, CSHID and RIP.
1999 */
2000 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2001 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2002 {
2003 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2004 if (rcStrict != VINF_SUCCESS)
2005 return rcStrict;
2006 /** @todo check what VT-x and AMD-V does. */
2007 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2008 }
2009
2010 /* stack */
2011 if (enmEffOpSize == IEMMODE_16BIT)
2012 {
2013 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2014 uPtrRet.pu16[1] = pCtx->cs.Sel;
2015 }
2016 else if (enmEffOpSize == IEMMODE_32BIT)
2017 {
2018 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2019 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2020 }
2021 else
2022 {
2023 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2024 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2025 }
2026 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029
2030 /* commit */
2031 pCtx->rip = offSeg;
2032 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2033 pCtx->cs.Sel |= pIemCpu->uCpl;
2034 pCtx->cs.ValidSel = pCtx->cs.Sel;
2035 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2036 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2037 pCtx->cs.u32Limit = cbLimit;
2038 pCtx->cs.u64Base = u64Base;
2039 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2040 pCtx->eflags.Bits.u1RF = 0;
2041 /** @todo check if the hidden bits are loaded correctly for 64-bit
2042 * mode. */
2043 return VINF_SUCCESS;
2044}
2045
2046
2047/**
2048 * Implements retf.
2049 *
2050 * @param enmEffOpSize The effective operand size.
2051 * @param cbPop The amount of arguments to pop from the stack
2052 * (bytes).
2053 */
2054IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2055{
2056 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2057 VBOXSTRICTRC rcStrict;
2058 RTCPTRUNION uPtrFrame;
2059 uint64_t uNewRsp;
2060 uint64_t uNewRip;
2061 uint16_t uNewCs;
2062 NOREF(cbInstr);
2063
2064 /*
2065 * Read the stack values first.
2066 */
2067 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2068 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2069 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2070 if (rcStrict != VINF_SUCCESS)
2071 return rcStrict;
2072 if (enmEffOpSize == IEMMODE_16BIT)
2073 {
2074 uNewRip = uPtrFrame.pu16[0];
2075 uNewCs = uPtrFrame.pu16[1];
2076 }
2077 else if (enmEffOpSize == IEMMODE_32BIT)
2078 {
2079 uNewRip = uPtrFrame.pu32[0];
2080 uNewCs = uPtrFrame.pu16[2];
2081 }
2082 else
2083 {
2084 uNewRip = uPtrFrame.pu64[0];
2085 uNewCs = uPtrFrame.pu16[4];
2086 }
2087
2088 /*
2089 * Real mode and V8086 mode are easy.
2090 */
2091 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2092 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2093 {
2094 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2095 /** @todo check how this is supposed to work if sp=0xfffe. */
2096
2097 /* Check the limit of the new EIP. */
2098 /** @todo Intel pseudo code only does the limit check for 16-bit
2099 * operands, AMD does not make any distinction. What is right? */
2100 if (uNewRip > pCtx->cs.u32Limit)
2101 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2102
2103 /* commit the operation. */
2104 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2105 if (rcStrict != VINF_SUCCESS)
2106 return rcStrict;
2107 pCtx->rip = uNewRip;
2108 pCtx->cs.Sel = uNewCs;
2109 pCtx->cs.ValidSel = uNewCs;
2110 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2111 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2112 pCtx->eflags.Bits.u1RF = 0;
2113 /** @todo do we load attribs and limit as well? */
2114 if (cbPop)
2115 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2116 return VINF_SUCCESS;
2117 }
2118
2119 /*
2120 * Protected mode is complicated, of course.
2121 */
2122 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2123 {
2124 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2125 return iemRaiseGeneralProtectionFault0(pIemCpu);
2126 }
2127
2128 /* Fetch the descriptor. */
2129 IEMSELDESC DescCs;
2130 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP);
2131 if (rcStrict != VINF_SUCCESS)
2132 return rcStrict;
2133
2134 /* Can only return to a code selector. */
2135 if ( !DescCs.Legacy.Gen.u1DescType
2136 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2137 {
2138 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2139 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2141 }
2142
2143 /* L vs D. */
2144 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2145 && DescCs.Legacy.Gen.u1DefBig
2146 && IEM_IS_LONG_MODE(pIemCpu))
2147 {
2148 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2150 }
2151
2152 /* DPL/RPL/CPL checks. */
2153 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2154 {
2155 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
2156 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2157 }
2158
2159 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2160 {
2161 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2162 {
2163 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2164 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2165 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2166 }
2167 }
2168 else
2169 {
2170 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2171 {
2172 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2173 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2174 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2175 }
2176 }
2177
2178 /* Is it there? */
2179 if (!DescCs.Legacy.Gen.u1Present)
2180 {
2181 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2182 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2183 }
2184
2185 /*
2186 * Return to outer privilege? (We'll typically have entered via a call gate.)
2187 */
2188 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2189 {
2190 /* Read the outer stack pointer stored *after* the parameters. */
2191 RTCPTRUNION uPtrStack;
2192 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
2193 if (rcStrict != VINF_SUCCESS)
2194 return rcStrict;
2195
2196 uPtrStack.pu8 += cbPop; /* Skip the parameters. */
2197
2198 uint16_t uNewOuterSs;
2199 uint64_t uNewOuterRsp;
2200 if (enmEffOpSize == IEMMODE_16BIT)
2201 {
2202 uNewOuterRsp = uPtrStack.pu16[0];
2203 uNewOuterSs = uPtrStack.pu16[1];
2204 }
2205 else if (enmEffOpSize == IEMMODE_32BIT)
2206 {
2207 uNewOuterRsp = uPtrStack.pu32[0];
2208 uNewOuterSs = uPtrStack.pu16[2];
2209 }
2210 else
2211 {
2212 uNewOuterRsp = uPtrStack.pu64[0];
2213 uNewOuterSs = uPtrStack.pu16[4];
2214 }
2215
2216 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2217 and read the selector. */
2218 IEMSELDESC DescSs;
2219 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2220 {
2221 if ( !DescCs.Legacy.Gen.u1Long
2222 || (uNewOuterSs & X86_SEL_RPL) == 3)
2223 {
2224 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2225 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2226 return iemRaiseGeneralProtectionFault0(pIemCpu);
2227 }
2228 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2229 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2230 }
2231 else
2232 {
2233 /* Fetch the descriptor for the new stack segment. */
2234 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2235 if (rcStrict != VINF_SUCCESS)
2236 return rcStrict;
2237 }
2238
2239 /* Check that RPL of stack and code selectors match. */
2240 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2241 {
2242 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2243 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2244 }
2245
2246 /* Must be a writable data segment. */
2247 if ( !DescSs.Legacy.Gen.u1DescType
2248 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2249 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2250 {
2251 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2252 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2253 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2254 }
2255
2256 /* L vs D. (Not mentioned by intel.) */
2257 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2258 && DescSs.Legacy.Gen.u1DefBig
2259 && IEM_IS_LONG_MODE(pIemCpu))
2260 {
2261 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2262 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2263 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2264 }
2265
2266 /* DPL/RPL/CPL checks. */
2267 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2268 {
2269 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2270 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2271 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2272 }
2273
2274 /* Is it there? */
2275 if (!DescSs.Legacy.Gen.u1Present)
2276 {
2277 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2278 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2279 }
2280
2281 /* Calc SS limit.*/
2282 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2283
2284 /* Is RIP canonical or within CS.limit? */
2285 uint64_t u64Base;
2286 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2287
2288 /** @todo Testcase: Is this correct? */
2289 if ( DescCs.Legacy.Gen.u1Long
2290 && IEM_IS_LONG_MODE(pIemCpu) )
2291 {
2292 if (!IEM_IS_CANONICAL(uNewRip))
2293 {
2294 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2295 return iemRaiseNotCanonical(pIemCpu);
2296 }
2297 u64Base = 0;
2298 }
2299 else
2300 {
2301 if (uNewRip > cbLimitCs)
2302 {
2303 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2304 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2305 /** @todo: Intel says this is #GP(0)! */
2306 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2307 }
2308 u64Base = X86DESC_BASE(&DescCs.Legacy);
2309 }
2310
2311 /*
2312 * Now set the accessed bit before
2313 * writing the return address to the stack and committing the result into
2314 * CS, CSHID and RIP.
2315 */
2316 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2317 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2318 {
2319 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2320 if (rcStrict != VINF_SUCCESS)
2321 return rcStrict;
2322 /** @todo check what VT-x and AMD-V does. */
2323 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2326 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2327 {
2328 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
2329 if (rcStrict != VINF_SUCCESS)
2330 return rcStrict;
2331 /** @todo check what VT-x and AMD-V does. */
2332 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2333 }
2334
2335 /* commit */
2336 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2337 if (rcStrict != VINF_SUCCESS)
2338 return rcStrict;
2339 if (enmEffOpSize == IEMMODE_16BIT)
2340 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2341 else
2342 pCtx->rip = uNewRip;
2343 pCtx->cs.Sel = uNewCs;
2344 pCtx->cs.ValidSel = uNewCs;
2345 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2346 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2347 pCtx->cs.u32Limit = cbLimitCs;
2348 pCtx->cs.u64Base = u64Base;
2349 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2350 pCtx->rsp = uNewOuterRsp;
2351 pCtx->ss.Sel = uNewOuterSs;
2352 pCtx->ss.ValidSel = uNewOuterSs;
2353 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2354 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2355 pCtx->ss.u32Limit = cbLimitSs;
2356 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2357 pCtx->ss.u64Base = 0;
2358 else
2359 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2360
2361 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /** @todo check if the hidden bits are loaded correctly for 64-bit
2368 * mode. */
2369
2370 if (cbPop)
2371 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2372 pCtx->eflags.Bits.u1RF = 0;
2373
2374 /* Done! */
2375 }
2376 /*
2377 * Return to the same privilege level
2378 */
2379 else
2380 {
2381 /* Limit / canonical check. */
2382 uint64_t u64Base;
2383 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2384
2385 /** @todo Testcase: Is this correct? */
2386 if ( DescCs.Legacy.Gen.u1Long
2387 && IEM_IS_LONG_MODE(pIemCpu) )
2388 {
2389 if (!IEM_IS_CANONICAL(uNewRip))
2390 {
2391 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2392 return iemRaiseNotCanonical(pIemCpu);
2393 }
2394 u64Base = 0;
2395 }
2396 else
2397 {
2398 if (uNewRip > cbLimitCs)
2399 {
2400 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2401 /** @todo: Intel says this is #GP(0)! */
2402 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2403 }
2404 u64Base = X86DESC_BASE(&DescCs.Legacy);
2405 }
2406
2407 /*
2408 * Now set the accessed bit before
2409 * writing the return address to the stack and committing the result into
2410 * CS, CSHID and RIP.
2411 */
2412 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2413 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2414 {
2415 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2416 if (rcStrict != VINF_SUCCESS)
2417 return rcStrict;
2418 /** @todo check what VT-x and AMD-V does. */
2419 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2420 }
2421
2422 /* commit */
2423 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2424 if (rcStrict != VINF_SUCCESS)
2425 return rcStrict;
2426 if (enmEffOpSize == IEMMODE_16BIT)
2427 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2428 else
2429 pCtx->rip = uNewRip;
2430 pCtx->cs.Sel = uNewCs;
2431 pCtx->cs.ValidSel = uNewCs;
2432 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2433 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2434 pCtx->cs.u32Limit = cbLimitCs;
2435 pCtx->cs.u64Base = u64Base;
2436 /** @todo check if the hidden bits are loaded correctly for 64-bit
2437 * mode. */
2438 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2439 if (cbPop)
2440 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2441 pCtx->eflags.Bits.u1RF = 0;
2442 }
2443 return VINF_SUCCESS;
2444}
2445
2446
2447/**
2448 * Implements retn.
2449 *
2450 * We're doing this in C because of the \#GP that might be raised if the popped
2451 * program counter is out of bounds.
2452 *
2453 * @param enmEffOpSize The effective operand size.
2454 * @param cbPop The amount of arguments to pop from the stack
2455 * (bytes).
2456 */
2457IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2458{
2459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2460 NOREF(cbInstr);
2461
2462 /* Fetch the RSP from the stack. */
2463 VBOXSTRICTRC rcStrict;
2464 RTUINT64U NewRip;
2465 RTUINT64U NewRsp;
2466 NewRsp.u = pCtx->rsp;
2467 switch (enmEffOpSize)
2468 {
2469 case IEMMODE_16BIT:
2470 NewRip.u = 0;
2471 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
2472 break;
2473 case IEMMODE_32BIT:
2474 NewRip.u = 0;
2475 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
2476 break;
2477 case IEMMODE_64BIT:
2478 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
2479 break;
2480 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2481 }
2482 if (rcStrict != VINF_SUCCESS)
2483 return rcStrict;
2484
2485 /* Check the new RSP before loading it. */
2486 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2487 * of it. The canonical test is performed here and for call. */
2488 if (enmEffOpSize != IEMMODE_64BIT)
2489 {
2490 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2491 {
2492 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2493 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2494 }
2495 }
2496 else
2497 {
2498 if (!IEM_IS_CANONICAL(NewRip.u))
2499 {
2500 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2501 return iemRaiseNotCanonical(pIemCpu);
2502 }
2503 }
2504
2505 /* Commit it. */
2506 pCtx->rip = NewRip.u;
2507 pCtx->rsp = NewRsp.u;
2508 if (cbPop)
2509 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2510 pCtx->eflags.Bits.u1RF = 0;
2511
2512 return VINF_SUCCESS;
2513}
2514
2515
2516/**
2517 * Implements enter.
2518 *
2519 * We're doing this in C because the instruction is insane, even for the
2520 * u8NestingLevel=0 case dealing with the stack is tedious.
2521 *
2522 * @param enmEffOpSize The effective operand size.
2523 */
2524IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2525{
2526 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2527
2528 /* Push RBP, saving the old value in TmpRbp. */
2529 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2530 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2531 RTUINT64U NewRbp;
2532 VBOXSTRICTRC rcStrict;
2533 if (enmEffOpSize == IEMMODE_64BIT)
2534 {
2535 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
2536 NewRbp = NewRsp;
2537 }
2538 else if (enmEffOpSize == IEMMODE_32BIT)
2539 {
2540 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
2541 NewRbp = NewRsp;
2542 }
2543 else
2544 {
2545 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
2546 NewRbp = TmpRbp;
2547 NewRbp.Words.w0 = NewRsp.Words.w0;
2548 }
2549 if (rcStrict != VINF_SUCCESS)
2550 return rcStrict;
2551
2552 /* Copy the parameters (aka nesting levels by Intel). */
2553 cParameters &= 0x1f;
2554 if (cParameters > 0)
2555 {
2556 switch (enmEffOpSize)
2557 {
2558 case IEMMODE_16BIT:
2559 if (pCtx->ss.Attr.n.u1DefBig)
2560 TmpRbp.DWords.dw0 -= 2;
2561 else
2562 TmpRbp.Words.w0 -= 2;
2563 do
2564 {
2565 uint16_t u16Tmp;
2566 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
2567 if (rcStrict != VINF_SUCCESS)
2568 break;
2569 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
2570 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2571 break;
2572
2573 case IEMMODE_32BIT:
2574 if (pCtx->ss.Attr.n.u1DefBig)
2575 TmpRbp.DWords.dw0 -= 4;
2576 else
2577 TmpRbp.Words.w0 -= 4;
2578 do
2579 {
2580 uint32_t u32Tmp;
2581 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
2582 if (rcStrict != VINF_SUCCESS)
2583 break;
2584 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
2585 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2586 break;
2587
2588 case IEMMODE_64BIT:
2589 TmpRbp.u -= 8;
2590 do
2591 {
2592 uint64_t u64Tmp;
2593 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
2594 if (rcStrict != VINF_SUCCESS)
2595 break;
2596 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
2597 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2598 break;
2599
2600 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2601 }
2602 if (rcStrict != VINF_SUCCESS)
2603 return VINF_SUCCESS;
2604
2605 /* Push the new RBP */
2606 if (enmEffOpSize == IEMMODE_64BIT)
2607 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
2608 else if (enmEffOpSize == IEMMODE_32BIT)
2609 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
2610 else
2611 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
2612 if (rcStrict != VINF_SUCCESS)
2613 return rcStrict;
2614
2615 }
2616
2617 /* Recalc RSP. */
2618 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
2619
2620 /** @todo Should probe write access at the new RSP according to AMD. */
2621
2622 /* Commit it. */
2623 pCtx->rbp = NewRbp.u;
2624 pCtx->rsp = NewRsp.u;
2625 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2626
2627 return VINF_SUCCESS;
2628}
2629
2630
2631
2632/**
2633 * Implements leave.
2634 *
2635 * We're doing this in C because messing with the stack registers is annoying
2636 * since they depends on SS attributes.
2637 *
2638 * @param enmEffOpSize The effective operand size.
2639 */
2640IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2641{
2642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2643
2644 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2645 RTUINT64U NewRsp;
2646 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2647 NewRsp.u = pCtx->rbp;
2648 else if (pCtx->ss.Attr.n.u1DefBig)
2649 NewRsp.u = pCtx->ebp;
2650 else
2651 {
2652 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2653 NewRsp.u = pCtx->rsp;
2654 NewRsp.Words.w0 = pCtx->bp;
2655 }
2656
2657 /* Pop RBP according to the operand size. */
2658 VBOXSTRICTRC rcStrict;
2659 RTUINT64U NewRbp;
2660 switch (enmEffOpSize)
2661 {
2662 case IEMMODE_16BIT:
2663 NewRbp.u = pCtx->rbp;
2664 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
2665 break;
2666 case IEMMODE_32BIT:
2667 NewRbp.u = 0;
2668 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
2669 break;
2670 case IEMMODE_64BIT:
2671 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
2672 break;
2673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2674 }
2675 if (rcStrict != VINF_SUCCESS)
2676 return rcStrict;
2677
2678
2679 /* Commit it. */
2680 pCtx->rbp = NewRbp.u;
2681 pCtx->rsp = NewRsp.u;
2682 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2683
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Implements int3 and int XX.
2690 *
2691 * @param u8Int The interrupt vector number.
2692 * @param fIsBpInstr Is it the breakpoint instruction.
2693 */
2694IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
2695{
2696 Assert(pIemCpu->cXcptRecursions == 0);
2697 return iemRaiseXcptOrInt(pIemCpu,
2698 cbInstr,
2699 u8Int,
2700 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
2701 0,
2702 0);
2703}
2704
2705
2706/**
2707 * Implements iret for real mode and V8086 mode.
2708 *
2709 * @param enmEffOpSize The effective operand size.
2710 */
2711IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2712{
2713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2714 X86EFLAGS Efl;
2715 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2716 NOREF(cbInstr);
2717
2718 /*
2719 * iret throws an exception if VME isn't enabled.
2720 */
2721 if ( Efl.Bits.u1VM
2722 && Efl.Bits.u2IOPL != 3
2723 && !(pCtx->cr4 & X86_CR4_VME))
2724 return iemRaiseGeneralProtectionFault0(pIemCpu);
2725
2726 /*
2727 * Do the stack bits, but don't commit RSP before everything checks
2728 * out right.
2729 */
2730 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2731 VBOXSTRICTRC rcStrict;
2732 RTCPTRUNION uFrame;
2733 uint16_t uNewCs;
2734 uint32_t uNewEip;
2735 uint32_t uNewFlags;
2736 uint64_t uNewRsp;
2737 if (enmEffOpSize == IEMMODE_32BIT)
2738 {
2739 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2740 if (rcStrict != VINF_SUCCESS)
2741 return rcStrict;
2742 uNewEip = uFrame.pu32[0];
2743 if (uNewEip > UINT16_MAX)
2744 return iemRaiseGeneralProtectionFault0(pIemCpu);
2745
2746 uNewCs = (uint16_t)uFrame.pu32[1];
2747 uNewFlags = uFrame.pu32[2];
2748 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2749 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2750 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2751 | X86_EFL_ID;
2752 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2753 }
2754 else
2755 {
2756 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2757 if (rcStrict != VINF_SUCCESS)
2758 return rcStrict;
2759 uNewEip = uFrame.pu16[0];
2760 uNewCs = uFrame.pu16[1];
2761 uNewFlags = uFrame.pu16[2];
2762 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2763 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2764 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2765 /** @todo The intel pseudo code does not indicate what happens to
2766 * reserved flags. We just ignore them. */
2767 }
2768 /** @todo Check how this is supposed to work if sp=0xfffe. */
2769 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2770 uNewCs, uNewEip, uNewFlags, uNewRsp));
2771
2772 /*
2773 * Check the limit of the new EIP.
2774 */
2775 /** @todo Only the AMD pseudo code check the limit here, what's
2776 * right? */
2777 if (uNewEip > pCtx->cs.u32Limit)
2778 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2779
2780 /*
2781 * V8086 checks and flag adjustments
2782 */
2783 if (Efl.Bits.u1VM)
2784 {
2785 if (Efl.Bits.u2IOPL == 3)
2786 {
2787 /* Preserve IOPL and clear RF. */
2788 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2789 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2790 }
2791 else if ( enmEffOpSize == IEMMODE_16BIT
2792 && ( !(uNewFlags & X86_EFL_IF)
2793 || !Efl.Bits.u1VIP )
2794 && !(uNewFlags & X86_EFL_TF) )
2795 {
2796 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2797 uNewFlags &= ~X86_EFL_VIF;
2798 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2799 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2800 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2801 }
2802 else
2803 return iemRaiseGeneralProtectionFault0(pIemCpu);
2804 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2805 }
2806
2807 /*
2808 * Commit the operation.
2809 */
2810 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2811 if (rcStrict != VINF_SUCCESS)
2812 return rcStrict;
2813#ifdef DBGFTRACE_ENABLED
2814 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2815 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2816#endif
2817
2818 pCtx->rip = uNewEip;
2819 pCtx->cs.Sel = uNewCs;
2820 pCtx->cs.ValidSel = uNewCs;
2821 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2822 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2823 /** @todo do we load attribs and limit as well? */
2824 Assert(uNewFlags & X86_EFL_1);
2825 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2826
2827 return VINF_SUCCESS;
2828}
2829
2830
2831/**
2832 * Loads a segment register when entering V8086 mode.
2833 *
2834 * @param pSReg The segment register.
2835 * @param uSeg The segment to load.
2836 */
2837static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2838{
2839 pSReg->Sel = uSeg;
2840 pSReg->ValidSel = uSeg;
2841 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2842 pSReg->u64Base = (uint32_t)uSeg << 4;
2843 pSReg->u32Limit = 0xffff;
2844 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2845 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2846 * IRET'ing to V8086. */
2847}
2848
2849
2850/**
2851 * Implements iret for protected mode returning to V8086 mode.
2852 *
2853 * @param pCtx Pointer to the CPU context.
2854 * @param uNewEip The new EIP.
2855 * @param uNewCs The new CS.
2856 * @param uNewFlags The new EFLAGS.
2857 * @param uNewRsp The RSP after the initial IRET frame.
2858 *
2859 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2860 */
2861IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2862 uint32_t, uNewFlags, uint64_t, uNewRsp)
2863{
2864 /*
2865 * Pop the V8086 specific frame bits off the stack.
2866 */
2867 VBOXSTRICTRC rcStrict;
2868 RTCPTRUNION uFrame;
2869 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2870 if (rcStrict != VINF_SUCCESS)
2871 return rcStrict;
2872 uint32_t uNewEsp = uFrame.pu32[0];
2873 uint16_t uNewSs = uFrame.pu32[1];
2874 uint16_t uNewEs = uFrame.pu32[2];
2875 uint16_t uNewDs = uFrame.pu32[3];
2876 uint16_t uNewFs = uFrame.pu32[4];
2877 uint16_t uNewGs = uFrame.pu32[5];
2878 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2879 if (rcStrict != VINF_SUCCESS)
2880 return rcStrict;
2881
2882 /*
2883 * Commit the operation.
2884 */
2885 uNewFlags &= X86_EFL_LIVE_MASK;
2886 uNewFlags |= X86_EFL_RA1_MASK;
2887#ifdef DBGFTRACE_ENABLED
2888 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2889 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2890#endif
2891
2892 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2893 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2894 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2895 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2896 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2897 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2898 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2899 pCtx->rip = uNewEip;
2900 pCtx->rsp = uNewEsp;
2901 pIemCpu->uCpl = 3;
2902
2903 return VINF_SUCCESS;
2904}
2905
2906
2907/**
2908 * Implements iret for protected mode returning via a nested task.
2909 *
2910 * @param enmEffOpSize The effective operand size.
2911 */
2912IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2913{
2914 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
2915#ifndef IEM_IMPLEMENTS_TASKSWITCH
2916 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2917#else
2918 /*
2919 * Read the segment selector in the link-field of the current TSS.
2920 */
2921 RTSEL uSelRet;
2922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2923 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
2924 if (rcStrict != VINF_SUCCESS)
2925 return rcStrict;
2926
2927 /*
2928 * Fetch the returning task's TSS descriptor from the GDT.
2929 */
2930 if (uSelRet & X86_SEL_LDT)
2931 {
2932 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
2933 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet);
2934 }
2935
2936 IEMSELDESC TssDesc;
2937 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940
2941 if (TssDesc.Legacy.Gate.u1DescType)
2942 {
2943 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
2944 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2945 }
2946
2947 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
2948 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2949 {
2950 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
2951 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2952 }
2953
2954 if (!TssDesc.Legacy.Gate.u1Present)
2955 {
2956 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
2957 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2958 }
2959
2960 uint32_t uNextEip = pCtx->eip + cbInstr;
2961 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
2962 0 /* uCr2 */, uSelRet, &TssDesc);
2963#endif
2964}
2965
2966
2967/**
2968 * Implements iret for protected mode
2969 *
2970 * @param enmEffOpSize The effective operand size.
2971 */
2972IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2973{
2974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2975 NOREF(cbInstr);
2976
2977 /*
2978 * Nested task return.
2979 */
2980 if (pCtx->eflags.Bits.u1NT)
2981 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2982
2983 /*
2984 * Normal return.
2985 *
2986 * Do the stack bits, but don't commit RSP before everything checks
2987 * out right.
2988 */
2989 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2990 VBOXSTRICTRC rcStrict;
2991 RTCPTRUNION uFrame;
2992 uint16_t uNewCs;
2993 uint32_t uNewEip;
2994 uint32_t uNewFlags;
2995 uint64_t uNewRsp;
2996 if (enmEffOpSize == IEMMODE_32BIT)
2997 {
2998 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2999 if (rcStrict != VINF_SUCCESS)
3000 return rcStrict;
3001 uNewEip = uFrame.pu32[0];
3002 uNewCs = (uint16_t)uFrame.pu32[1];
3003 uNewFlags = uFrame.pu32[2];
3004 }
3005 else
3006 {
3007 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
3008 if (rcStrict != VINF_SUCCESS)
3009 return rcStrict;
3010 uNewEip = uFrame.pu16[0];
3011 uNewCs = uFrame.pu16[1];
3012 uNewFlags = uFrame.pu16[2];
3013 }
3014 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3015 if (rcStrict != VINF_SUCCESS)
3016 return rcStrict;
3017 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n", uNewCs, uNewEip, uNewFlags, uNewRsp));
3018
3019 /*
3020 * We're hopefully not returning to V8086 mode...
3021 */
3022 if ( (uNewFlags & X86_EFL_VM)
3023 && pIemCpu->uCpl == 0)
3024 {
3025 Assert(enmEffOpSize == IEMMODE_32BIT);
3026 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3027 }
3028
3029 /*
3030 * Protected mode.
3031 */
3032 /* Read the CS descriptor. */
3033 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3034 {
3035 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3036 return iemRaiseGeneralProtectionFault0(pIemCpu);
3037 }
3038
3039 IEMSELDESC DescCS;
3040 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3041 if (rcStrict != VINF_SUCCESS)
3042 {
3043 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3044 return rcStrict;
3045 }
3046
3047 /* Must be a code descriptor. */
3048 if (!DescCS.Legacy.Gen.u1DescType)
3049 {
3050 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3051 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3052 }
3053 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3054 {
3055 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3056 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3057 }
3058
3059#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3060 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3061 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3062 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3063 {
3064 if ((uNewCs & X86_SEL_RPL) == 1)
3065 {
3066 if ( pIemCpu->uCpl == 0
3067 && ( !EMIsRawRing1Enabled(pVM)
3068 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3069 {
3070 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3071 uNewCs &= X86_SEL_MASK_OFF_RPL;
3072 }
3073# ifdef LOG_ENABLED
3074 else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3075 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3076# endif
3077 }
3078 else if ( (uNewCs & X86_SEL_RPL) == 2
3079 && EMIsRawRing1Enabled(pVM)
3080 && pIemCpu->uCpl <= 1)
3081 {
3082 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3083 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3084 }
3085 }
3086#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3087
3088
3089 /* Privilege checks. */
3090 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3091 {
3092 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
3093 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3094 }
3095 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3096 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3097 {
3098 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3099 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3100 }
3101
3102 /* Present? */
3103 if (!DescCS.Legacy.Gen.u1Present)
3104 {
3105 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3106 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3107 }
3108
3109 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3110
3111 /*
3112 * Return to outer level?
3113 */
3114 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
3115 {
3116 uint16_t uNewSS;
3117 uint32_t uNewESP;
3118 if (enmEffOpSize == IEMMODE_32BIT)
3119 {
3120 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
3121 if (rcStrict != VINF_SUCCESS)
3122 return rcStrict;
3123/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3124 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3125 * bit of the popped SS selector it turns out. */
3126 uNewESP = uFrame.pu32[0];
3127 uNewSS = (uint16_t)uFrame.pu32[1];
3128 }
3129 else
3130 {
3131 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 4, &uFrame.pv, &uNewRsp);
3132 if (rcStrict != VINF_SUCCESS)
3133 return rcStrict;
3134 uNewESP = uFrame.pu16[0];
3135 uNewSS = uFrame.pu16[1];
3136 }
3137 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3138 if (rcStrict != VINF_SUCCESS)
3139 return rcStrict;
3140 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3141
3142 /* Read the SS descriptor. */
3143 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3144 {
3145 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3146 return iemRaiseGeneralProtectionFault0(pIemCpu);
3147 }
3148
3149 IEMSELDESC DescSS;
3150 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3151 if (rcStrict != VINF_SUCCESS)
3152 {
3153 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3154 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3155 return rcStrict;
3156 }
3157
3158 /* Privilege checks. */
3159 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3160 {
3161 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3162 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3163 }
3164 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3165 {
3166 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3167 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3168 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3169 }
3170
3171 /* Must be a writeable data segment descriptor. */
3172 if (!DescSS.Legacy.Gen.u1DescType)
3173 {
3174 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3175 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3176 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3177 }
3178 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3179 {
3180 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3181 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3182 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3183 }
3184
3185 /* Present? */
3186 if (!DescSS.Legacy.Gen.u1Present)
3187 {
3188 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3189 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
3190 }
3191
3192 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3193
3194 /* Check EIP. */
3195 if (uNewEip > cbLimitCS)
3196 {
3197 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3198 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3199 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3200 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3201 }
3202
3203 /*
3204 * Commit the changes, marking CS and SS accessed first since
3205 * that may fail.
3206 */
3207 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3208 {
3209 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3210 if (rcStrict != VINF_SUCCESS)
3211 return rcStrict;
3212 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3213 }
3214 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3215 {
3216 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3217 if (rcStrict != VINF_SUCCESS)
3218 return rcStrict;
3219 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3220 }
3221
3222 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3223 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3224 if (enmEffOpSize != IEMMODE_16BIT)
3225 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3226 if (pIemCpu->uCpl == 0)
3227 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3228 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3229 fEFlagsMask |= X86_EFL_IF;
3230 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3231 fEFlagsNew &= ~fEFlagsMask;
3232 fEFlagsNew |= uNewFlags & fEFlagsMask;
3233#ifdef DBGFTRACE_ENABLED
3234 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3235 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3236 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3237#endif
3238
3239 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3240 pCtx->rip = uNewEip;
3241 pCtx->cs.Sel = uNewCs;
3242 pCtx->cs.ValidSel = uNewCs;
3243 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3244 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3245 pCtx->cs.u32Limit = cbLimitCS;
3246 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3247 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3248 if (!pCtx->ss.Attr.n.u1DefBig)
3249 pCtx->sp = (uint16_t)uNewESP;
3250 else
3251 pCtx->rsp = uNewESP;
3252 pCtx->ss.Sel = uNewSS;
3253 pCtx->ss.ValidSel = uNewSS;
3254 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3255 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3256 pCtx->ss.u32Limit = cbLimitSs;
3257 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3258
3259 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
3260 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3261 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3262 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3263 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3264
3265 /* Done! */
3266
3267 }
3268 /*
3269 * Return to the same level.
3270 */
3271 else
3272 {
3273 /* Check EIP. */
3274 if (uNewEip > cbLimitCS)
3275 {
3276 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3277 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3278 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3279 }
3280
3281 /*
3282 * Commit the changes, marking CS first since it may fail.
3283 */
3284 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3285 {
3286 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3287 if (rcStrict != VINF_SUCCESS)
3288 return rcStrict;
3289 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3290 }
3291
3292 X86EFLAGS NewEfl;
3293 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
3294 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3295 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3296 if (enmEffOpSize != IEMMODE_16BIT)
3297 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3298 if (pIemCpu->uCpl == 0)
3299 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3300 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
3301 fEFlagsMask |= X86_EFL_IF;
3302 NewEfl.u &= ~fEFlagsMask;
3303 NewEfl.u |= fEFlagsMask & uNewFlags;
3304#ifdef DBGFTRACE_ENABLED
3305 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3306 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,
3307 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3308#endif
3309
3310 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
3311 pCtx->rip = uNewEip;
3312 pCtx->cs.Sel = uNewCs;
3313 pCtx->cs.ValidSel = uNewCs;
3314 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3315 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3316 pCtx->cs.u32Limit = cbLimitCS;
3317 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3318 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3319 pCtx->rsp = uNewRsp;
3320 /* Done! */
3321 }
3322 return VINF_SUCCESS;
3323}
3324
3325
3326/**
3327 * Implements iret for long mode
3328 *
3329 * @param enmEffOpSize The effective operand size.
3330 */
3331IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
3332{
3333 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3334 NOREF(cbInstr);
3335
3336 /*
3337 * Nested task return is not supported in long mode.
3338 */
3339 if (pCtx->eflags.Bits.u1NT)
3340 {
3341 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3342 return iemRaiseGeneralProtectionFault0(pIemCpu);
3343 }
3344
3345 /*
3346 * Normal return.
3347 *
3348 * Do the stack bits, but don't commit RSP before everything checks
3349 * out right.
3350 */
3351 VBOXSTRICTRC rcStrict;
3352 RTCPTRUNION uFrame;
3353 uint64_t uNewRip;
3354 uint16_t uNewCs;
3355 uint16_t uNewSs;
3356 uint32_t uNewFlags;
3357 uint64_t uNewRsp;
3358 if (enmEffOpSize == IEMMODE_64BIT)
3359 {
3360 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
3361 if (rcStrict != VINF_SUCCESS)
3362 return rcStrict;
3363 uNewRip = uFrame.pu64[0];
3364 uNewCs = (uint16_t)uFrame.pu64[1];
3365 uNewFlags = (uint32_t)uFrame.pu64[2];
3366 uNewRsp = uFrame.pu64[3];
3367 uNewSs = (uint16_t)uFrame.pu64[4];
3368 }
3369 else if (enmEffOpSize == IEMMODE_32BIT)
3370 {
3371 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
3372 if (rcStrict != VINF_SUCCESS)
3373 return rcStrict;
3374 uNewRip = uFrame.pu32[0];
3375 uNewCs = (uint16_t)uFrame.pu32[1];
3376 uNewFlags = uFrame.pu32[2];
3377 uNewRsp = uFrame.pu32[3];
3378 uNewSs = (uint16_t)uFrame.pu32[4];
3379 }
3380 else
3381 {
3382 Assert(enmEffOpSize == IEMMODE_16BIT);
3383 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
3384 if (rcStrict != VINF_SUCCESS)
3385 return rcStrict;
3386 uNewRip = uFrame.pu16[0];
3387 uNewCs = uFrame.pu16[1];
3388 uNewFlags = uFrame.pu16[2];
3389 uNewRsp = uFrame.pu16[3];
3390 uNewSs = uFrame.pu16[4];
3391 }
3392 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3393 if (rcStrict != VINF_SUCCESS)
3394 return rcStrict;
3395 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3396
3397 /*
3398 * Check stuff.
3399 */
3400 /* Read the CS descriptor. */
3401 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3402 {
3403 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3404 return iemRaiseGeneralProtectionFault0(pIemCpu);
3405 }
3406
3407 IEMSELDESC DescCS;
3408 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3409 if (rcStrict != VINF_SUCCESS)
3410 {
3411 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3412 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3413 return rcStrict;
3414 }
3415
3416 /* Must be a code descriptor. */
3417 if ( !DescCS.Legacy.Gen.u1DescType
3418 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3419 {
3420 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3421 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3422 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3423 }
3424
3425 /* Privilege checks. */
3426 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3427 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3428 {
3429 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
3430 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3431 }
3432 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3433 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3434 {
3435 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
3436 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl));
3437 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3438 }
3439
3440 /* Present? */
3441 if (!DescCS.Legacy.Gen.u1Present)
3442 {
3443 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3444 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3445 }
3446
3447 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3448
3449 /* Read the SS descriptor. */
3450 IEMSELDESC DescSS;
3451 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3452 {
3453 if ( !DescCS.Legacy.Gen.u1Long
3454 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3455 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3456 {
3457 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3458 return iemRaiseGeneralProtectionFault0(pIemCpu);
3459 }
3460 DescSS.Legacy.u = 0;
3461 }
3462 else
3463 {
3464 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3465 if (rcStrict != VINF_SUCCESS)
3466 {
3467 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3468 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3469 return rcStrict;
3470 }
3471 }
3472
3473 /* Privilege checks. */
3474 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3475 {
3476 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3477 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3478 }
3479
3480 uint32_t cbLimitSs;
3481 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3482 cbLimitSs = UINT32_MAX;
3483 else
3484 {
3485 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3486 {
3487 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3488 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3490 }
3491
3492 /* Must be a writeable data segment descriptor. */
3493 if (!DescSS.Legacy.Gen.u1DescType)
3494 {
3495 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3496 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3497 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3498 }
3499 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3500 {
3501 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3502 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3504 }
3505
3506 /* Present? */
3507 if (!DescSS.Legacy.Gen.u1Present)
3508 {
3509 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3510 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
3511 }
3512 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3513 }
3514
3515 /* Check EIP. */
3516 if (DescCS.Legacy.Gen.u1Long)
3517 {
3518 if (!IEM_IS_CANONICAL(uNewRip))
3519 {
3520 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3521 uNewCs, uNewRip, uNewSs, uNewRsp));
3522 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3523 }
3524 }
3525 else
3526 {
3527 if (uNewRip > cbLimitCS)
3528 {
3529 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3530 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3531 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3532 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3533 }
3534 }
3535
3536 /*
3537 * Commit the changes, marking CS and SS accessed first since
3538 * that may fail.
3539 */
3540 /** @todo where exactly are these actually marked accessed by a real CPU? */
3541 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3542 {
3543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3547 }
3548 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3557 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3558 if (enmEffOpSize != IEMMODE_16BIT)
3559 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3560 if (pIemCpu->uCpl == 0)
3561 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3562 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3563 fEFlagsMask |= X86_EFL_IF;
3564 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3565 fEFlagsNew &= ~fEFlagsMask;
3566 fEFlagsNew |= uNewFlags & fEFlagsMask;
3567#ifdef DBGFTRACE_ENABLED
3568 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3569 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3570#endif
3571
3572 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3573 pCtx->rip = uNewRip;
3574 pCtx->cs.Sel = uNewCs;
3575 pCtx->cs.ValidSel = uNewCs;
3576 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3577 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3578 pCtx->cs.u32Limit = cbLimitCS;
3579 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3580 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3581 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3582 pCtx->rsp = uNewRsp;
3583 else
3584 pCtx->sp = (uint16_t)uNewRsp;
3585 pCtx->ss.Sel = uNewSs;
3586 pCtx->ss.ValidSel = uNewSs;
3587 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3588 {
3589 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3590 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3591 pCtx->ss.u32Limit = UINT32_MAX;
3592 pCtx->ss.u64Base = 0;
3593 Log2(("iretq new SS: NULL\n"));
3594 }
3595 else
3596 {
3597 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3598 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3599 pCtx->ss.u32Limit = cbLimitSs;
3600 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3601 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3602 }
3603
3604 if (pIemCpu->uCpl != uNewCpl)
3605 {
3606 pIemCpu->uCpl = uNewCpl;
3607 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
3608 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
3609 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
3610 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
3611 }
3612
3613 return VINF_SUCCESS;
3614}
3615
3616
3617/**
3618 * Implements iret.
3619 *
3620 * @param enmEffOpSize The effective operand size.
3621 */
3622IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3623{
3624 /*
3625 * First, clear NMI blocking, if any, before causing any exceptions.
3626 */
3627 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3628 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3629
3630 /*
3631 * Call a mode specific worker.
3632 */
3633 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3634 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3635 if (IEM_IS_LONG_MODE(pIemCpu))
3636 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
3637 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3638}
3639
3640
3641/**
3642 * Implements SYSCALL (AMD and Intel64).
3643 *
3644 * @param enmEffOpSize The effective operand size.
3645 */
3646IEM_CIMPL_DEF_0(iemCImpl_syscall)
3647{
3648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3649
3650 /*
3651 * Check preconditions.
3652 *
3653 * Note that CPUs described in the documentation may load a few odd values
3654 * into CS and SS than we allow here. This has yet to be checked on real
3655 * hardware.
3656 */
3657 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3658 {
3659 Log(("syscall: Not enabled in EFER -> #UD\n"));
3660 return iemRaiseUndefinedOpcode(pIemCpu);
3661 }
3662 if (!(pCtx->cr0 & X86_CR0_PE))
3663 {
3664 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3665 return iemRaiseGeneralProtectionFault0(pIemCpu);
3666 }
3667 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3668 {
3669 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3670 return iemRaiseUndefinedOpcode(pIemCpu);
3671 }
3672
3673 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3674 /** @todo what about LDT selectors? Shouldn't matter, really. */
3675 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3676 uint16_t uNewSs = uNewCs + 8;
3677 if (uNewCs == 0 || uNewSs == 0)
3678 {
3679 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3680 return iemRaiseGeneralProtectionFault0(pIemCpu);
3681 }
3682
3683 /* Long mode and legacy mode differs. */
3684 if (CPUMIsGuestInLongModeEx(pCtx))
3685 {
3686 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3687
3688 /* This test isn't in the docs, but I'm not trusting the guys writing
3689 the MSRs to have validated the values as canonical like they should. */
3690 if (!IEM_IS_CANONICAL(uNewRip))
3691 {
3692 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3693 return iemRaiseUndefinedOpcode(pIemCpu);
3694 }
3695
3696 /*
3697 * Commit it.
3698 */
3699 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3700 pCtx->rcx = pCtx->rip + cbInstr;
3701 pCtx->rip = uNewRip;
3702
3703 pCtx->rflags.u &= ~X86_EFL_RF;
3704 pCtx->r11 = pCtx->rflags.u;
3705 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3706 pCtx->rflags.u |= X86_EFL_1;
3707
3708 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3709 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3710 }
3711 else
3712 {
3713 /*
3714 * Commit it.
3715 */
3716 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3717 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3718 pCtx->rcx = pCtx->eip + cbInstr;
3719 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3720 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3721
3722 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3723 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3724 }
3725 pCtx->cs.Sel = uNewCs;
3726 pCtx->cs.ValidSel = uNewCs;
3727 pCtx->cs.u64Base = 0;
3728 pCtx->cs.u32Limit = UINT32_MAX;
3729 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3730
3731 pCtx->ss.Sel = uNewSs;
3732 pCtx->ss.ValidSel = uNewSs;
3733 pCtx->ss.u64Base = 0;
3734 pCtx->ss.u32Limit = UINT32_MAX;
3735 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3736
3737 return VINF_SUCCESS;
3738}
3739
3740
3741/**
3742 * Implements SYSRET (AMD and Intel64).
3743 */
3744IEM_CIMPL_DEF_0(iemCImpl_sysret)
3745
3746{
3747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3748
3749 /*
3750 * Check preconditions.
3751 *
3752 * Note that CPUs described in the documentation may load a few odd values
3753 * into CS and SS than we allow here. This has yet to be checked on real
3754 * hardware.
3755 */
3756 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3757 {
3758 Log(("sysret: Not enabled in EFER -> #UD\n"));
3759 return iemRaiseUndefinedOpcode(pIemCpu);
3760 }
3761 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3762 {
3763 Log(("sysret: Only available in long mode on intel -> #UD\n"));
3764 return iemRaiseUndefinedOpcode(pIemCpu);
3765 }
3766 if (!(pCtx->cr0 & X86_CR0_PE))
3767 {
3768 Log(("sysret: Protected mode is required -> #GP(0)\n"));
3769 return iemRaiseGeneralProtectionFault0(pIemCpu);
3770 }
3771 if (pIemCpu->uCpl != 0)
3772 {
3773 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
3774 return iemRaiseGeneralProtectionFault0(pIemCpu);
3775 }
3776
3777 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
3778 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3779 uint16_t uNewSs = uNewCs + 8;
3780 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3781 uNewCs += 16;
3782 if (uNewCs == 0 || uNewSs == 0)
3783 {
3784 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3785 return iemRaiseGeneralProtectionFault0(pIemCpu);
3786 }
3787
3788 /*
3789 * Commit it.
3790 */
3791 if (CPUMIsGuestInLongModeEx(pCtx))
3792 {
3793 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3794 {
3795 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
3796 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
3797 /* Note! We disregard intel manual regarding the RCX cananonical
3798 check, ask intel+xen why AMD doesn't do it. */
3799 pCtx->rip = pCtx->rcx;
3800 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3801 | (3 << X86DESCATTR_DPL_SHIFT);
3802 }
3803 else
3804 {
3805 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
3806 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
3807 pCtx->rip = pCtx->ecx;
3808 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3809 | (3 << X86DESCATTR_DPL_SHIFT);
3810 }
3811 /** @todo testcase: See what kind of flags we can make SYSRET restore and
3812 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
3813 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
3814 pCtx->rflags.u |= X86_EFL_1;
3815 }
3816 else
3817 {
3818 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
3819 pCtx->rip = pCtx->rcx;
3820 pCtx->rflags.u |= X86_EFL_IF;
3821 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3822 | (3 << X86DESCATTR_DPL_SHIFT);
3823 }
3824 pCtx->cs.Sel = uNewCs | 3;
3825 pCtx->cs.ValidSel = uNewCs | 3;
3826 pCtx->cs.u64Base = 0;
3827 pCtx->cs.u32Limit = UINT32_MAX;
3828 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829
3830 pCtx->ss.Sel = uNewSs | 3;
3831 pCtx->ss.ValidSel = uNewSs | 3;
3832 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3833 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
3834 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
3835 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
3836 * on sysret. */
3837
3838 return VINF_SUCCESS;
3839}
3840
3841
3842/**
3843 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
3844 *
3845 * @param iSegReg The segment register number (valid).
3846 * @param uSel The new selector value.
3847 */
3848IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
3849{
3850 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3851 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
3852 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
3853
3854 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
3855
3856 /*
3857 * Real mode and V8086 mode are easy.
3858 */
3859 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3860 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3861 {
3862 *pSel = uSel;
3863 pHid->u64Base = (uint32_t)uSel << 4;
3864 pHid->ValidSel = uSel;
3865 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3866#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
3867 /** @todo Does the CPU actually load limits and attributes in the
3868 * real/V8086 mode segment load case? It doesn't for CS in far
3869 * jumps... Affects unreal mode. */
3870 pHid->u32Limit = 0xffff;
3871 pHid->Attr.u = 0;
3872 pHid->Attr.n.u1Present = 1;
3873 pHid->Attr.n.u1DescType = 1;
3874 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
3875 ? X86_SEL_TYPE_RW
3876 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
3877#endif
3878 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3879 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3880 return VINF_SUCCESS;
3881 }
3882
3883 /*
3884 * Protected mode.
3885 *
3886 * Check if it's a null segment selector value first, that's OK for DS, ES,
3887 * FS and GS. If not null, then we have to load and parse the descriptor.
3888 */
3889 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3890 {
3891 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
3892 if (iSegReg == X86_SREG_SS)
3893 {
3894 /* In 64-bit kernel mode, the stack can be 0 because of the way
3895 interrupts are dispatched. AMD seems to have a slighly more
3896 relaxed relationship to SS.RPL than intel does. */
3897 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
3898 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
3899 || pIemCpu->uCpl > 2
3900 || ( uSel != pIemCpu->uCpl
3901 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
3902 {
3903 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
3904 return iemRaiseGeneralProtectionFault0(pIemCpu);
3905 }
3906 }
3907
3908 *pSel = uSel; /* Not RPL, remember :-) */
3909 iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel);
3910 if (iSegReg == X86_SREG_SS)
3911 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
3912
3913 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3914 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3915
3916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3917 return VINF_SUCCESS;
3918 }
3919
3920 /* Fetch the descriptor. */
3921 IEMSELDESC Desc;
3922 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
3923 if (rcStrict != VINF_SUCCESS)
3924 return rcStrict;
3925
3926 /* Check GPs first. */
3927 if (!Desc.Legacy.Gen.u1DescType)
3928 {
3929 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3930 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3931 }
3932 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3933 {
3934 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3935 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3936 {
3937 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3938 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3939 }
3940 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3941 {
3942 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3943 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3944 }
3945 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3946 {
3947 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3948 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3949 }
3950 }
3951 else
3952 {
3953 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3954 {
3955 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
3956 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3957 }
3958 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3959 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3960 {
3961#if 0 /* this is what intel says. */
3962 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3963 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
3964 {
3965 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
3966 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
3967 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3968 }
3969#else /* this is what makes more sense. */
3970 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
3971 {
3972 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
3973 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
3974 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3975 }
3976 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
3977 {
3978 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
3979 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
3980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3981 }
3982#endif
3983 }
3984 }
3985
3986 /* Is it there? */
3987 if (!Desc.Legacy.Gen.u1Present)
3988 {
3989 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
3990 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
3991 }
3992
3993 /* The base and limit. */
3994 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3995 uint64_t u64Base;
3996 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
3997 && iSegReg < X86_SREG_FS)
3998 u64Base = 0;
3999 else
4000 u64Base = X86DESC_BASE(&Desc.Legacy);
4001
4002 /*
4003 * Ok, everything checked out fine. Now set the accessed bit before
4004 * committing the result into the registers.
4005 */
4006 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4007 {
4008 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4009 if (rcStrict != VINF_SUCCESS)
4010 return rcStrict;
4011 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4012 }
4013
4014 /* commit */
4015 *pSel = uSel;
4016 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4017 pHid->u32Limit = cbLimit;
4018 pHid->u64Base = u64Base;
4019 pHid->ValidSel = uSel;
4020 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4021
4022 /** @todo check if the hidden bits are loaded correctly for 64-bit
4023 * mode. */
4024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
4025
4026 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
4027 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4028 return VINF_SUCCESS;
4029}
4030
4031
4032/**
4033 * Implements 'mov SReg, r/m'.
4034 *
4035 * @param iSegReg The segment register number (valid).
4036 * @param uSel The new selector value.
4037 */
4038IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4039{
4040 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4041 if (rcStrict == VINF_SUCCESS)
4042 {
4043 if (iSegReg == X86_SREG_SS)
4044 {
4045 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4046 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4047 }
4048 }
4049 return rcStrict;
4050}
4051
4052
4053/**
4054 * Implements 'pop SReg'.
4055 *
4056 * @param iSegReg The segment register number (valid).
4057 * @param enmEffOpSize The efficient operand size (valid).
4058 */
4059IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4060{
4061 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4062 VBOXSTRICTRC rcStrict;
4063
4064 /*
4065 * Read the selector off the stack and join paths with mov ss, reg.
4066 */
4067 RTUINT64U TmpRsp;
4068 TmpRsp.u = pCtx->rsp;
4069 switch (enmEffOpSize)
4070 {
4071 case IEMMODE_16BIT:
4072 {
4073 uint16_t uSel;
4074 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4075 if (rcStrict == VINF_SUCCESS)
4076 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4077 break;
4078 }
4079
4080 case IEMMODE_32BIT:
4081 {
4082 uint32_t u32Value;
4083 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4084 if (rcStrict == VINF_SUCCESS)
4085 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4086 break;
4087 }
4088
4089 case IEMMODE_64BIT:
4090 {
4091 uint64_t u64Value;
4092 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4093 if (rcStrict == VINF_SUCCESS)
4094 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4095 break;
4096 }
4097 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4098 }
4099
4100 /*
4101 * Commit the stack on success.
4102 */
4103 if (rcStrict == VINF_SUCCESS)
4104 {
4105 pCtx->rsp = TmpRsp.u;
4106 if (iSegReg == X86_SREG_SS)
4107 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4108 }
4109 return rcStrict;
4110}
4111
4112
4113/**
4114 * Implements lgs, lfs, les, lds & lss.
4115 */
4116IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4117 uint16_t, uSel,
4118 uint64_t, offSeg,
4119 uint8_t, iSegReg,
4120 uint8_t, iGReg,
4121 IEMMODE, enmEffOpSize)
4122{
4123 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
4124 VBOXSTRICTRC rcStrict;
4125
4126 /*
4127 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4128 */
4129 /** @todo verify and test that mov, pop and lXs works the segment
4130 * register loading in the exact same way. */
4131 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4132 if (rcStrict == VINF_SUCCESS)
4133 {
4134 switch (enmEffOpSize)
4135 {
4136 case IEMMODE_16BIT:
4137 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4138 break;
4139 case IEMMODE_32BIT:
4140 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4141 break;
4142 case IEMMODE_64BIT:
4143 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4144 break;
4145 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4146 }
4147 }
4148
4149 return rcStrict;
4150}
4151
4152
4153/**
4154 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4155 *
4156 * @retval VINF_SUCCESS on success.
4157 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4158 * @retval iemMemFetchSysU64 return value.
4159 *
4160 * @param pIemCpu The IEM state of the calling EMT.
4161 * @param uSel The selector value.
4162 * @param fAllowSysDesc Whether system descriptors are OK or not.
4163 * @param pDesc Where to return the descriptor on success.
4164 */
4165static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4166{
4167 pDesc->Long.au64[0] = 0;
4168 pDesc->Long.au64[1] = 0;
4169
4170 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4171 return VINF_IEM_SELECTOR_NOT_OK;
4172
4173 /* Within the table limits? */
4174 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4175 RTGCPTR GCPtrBase;
4176 if (uSel & X86_SEL_LDT)
4177 {
4178 if ( !pCtx->ldtr.Attr.n.u1Present
4179 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4180 return VINF_IEM_SELECTOR_NOT_OK;
4181 GCPtrBase = pCtx->ldtr.u64Base;
4182 }
4183 else
4184 {
4185 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4186 return VINF_IEM_SELECTOR_NOT_OK;
4187 GCPtrBase = pCtx->gdtr.pGdt;
4188 }
4189
4190 /* Fetch the descriptor. */
4191 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4192 if (rcStrict != VINF_SUCCESS)
4193 return rcStrict;
4194 if (!pDesc->Legacy.Gen.u1DescType)
4195 {
4196 if (!fAllowSysDesc)
4197 return VINF_IEM_SELECTOR_NOT_OK;
4198 if (CPUMIsGuestInLongModeEx(pCtx))
4199 {
4200 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4201 if (rcStrict != VINF_SUCCESS)
4202 return rcStrict;
4203 }
4204
4205 }
4206
4207 return VINF_SUCCESS;
4208}
4209
4210
4211/**
4212 * Implements verr (fWrite = false) and verw (fWrite = true).
4213 */
4214IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4215{
4216 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4217
4218 /** @todo figure whether the accessed bit is set or not. */
4219
4220 bool fAccessible = true;
4221 IEMSELDESC Desc;
4222 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4223 if (rcStrict == VINF_SUCCESS)
4224 {
4225 /* Check the descriptor, order doesn't matter much here. */
4226 if ( !Desc.Legacy.Gen.u1DescType
4227 || !Desc.Legacy.Gen.u1Present)
4228 fAccessible = false;
4229 else
4230 {
4231 if ( fWrite
4232 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4233 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4234 fAccessible = false;
4235
4236 /** @todo testcase for the conforming behavior. */
4237 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4238 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4239 {
4240 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4241 fAccessible = false;
4242 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4243 fAccessible = false;
4244 }
4245 }
4246
4247 }
4248 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4249 fAccessible = false;
4250 else
4251 return rcStrict;
4252
4253 /* commit */
4254 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;
4255
4256 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4257 return VINF_SUCCESS;
4258}
4259
4260
4261/**
4262 * Implements LAR and LSL with 64-bit operand size.
4263 *
4264 * @returns VINF_SUCCESS.
4265 * @param pu16Dst Pointer to the destination register.
4266 * @param uSel The selector to load details for.
4267 * @param pEFlags Pointer to the eflags register.
4268 * @param fIsLar true = LAR, false = LSL.
4269 */
4270IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4271{
4272 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4273
4274 /** @todo figure whether the accessed bit is set or not. */
4275
4276 bool fDescOk = true;
4277 IEMSELDESC Desc;
4278 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4279 if (rcStrict == VINF_SUCCESS)
4280 {
4281 /*
4282 * Check the descriptor type.
4283 */
4284 if (!Desc.Legacy.Gen.u1DescType)
4285 {
4286 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx)))
4287 {
4288 if (Desc.Long.Gen.u5Zeros)
4289 fDescOk = false;
4290 else
4291 switch (Desc.Long.Gen.u4Type)
4292 {
4293 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4294 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4295 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4296 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4297 break;
4298 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4299 fDescOk = fIsLar;
4300 break;
4301 default:
4302 fDescOk = false;
4303 break;
4304 }
4305 }
4306 else
4307 {
4308 switch (Desc.Long.Gen.u4Type)
4309 {
4310 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4311 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4312 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4313 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4314 case X86_SEL_TYPE_SYS_LDT:
4315 break;
4316 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4317 case X86_SEL_TYPE_SYS_TASK_GATE:
4318 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4319 fDescOk = fIsLar;
4320 break;
4321 default:
4322 fDescOk = false;
4323 break;
4324 }
4325 }
4326 }
4327 if (fDescOk)
4328 {
4329 /*
4330 * Check the RPL/DPL/CPL interaction..
4331 */
4332 /** @todo testcase for the conforming behavior. */
4333 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4334 || !Desc.Legacy.Gen.u1DescType)
4335 {
4336 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4337 fDescOk = false;
4338 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4339 fDescOk = false;
4340 }
4341 }
4342
4343 if (fDescOk)
4344 {
4345 /*
4346 * All fine, start committing the result.
4347 */
4348 if (fIsLar)
4349 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4350 else
4351 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4352 }
4353
4354 }
4355 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4356 fDescOk = false;
4357 else
4358 return rcStrict;
4359
4360 /* commit flags value and advance rip. */
4361 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;
4362 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4363
4364 return VINF_SUCCESS;
4365}
4366
4367
4368/**
4369 * Implements LAR and LSL with 16-bit operand size.
4370 *
4371 * @returns VINF_SUCCESS.
4372 * @param pu16Dst Pointer to the destination register.
4373 * @param u16Sel The selector to load details for.
4374 * @param pEFlags Pointer to the eflags register.
4375 * @param fIsLar true = LAR, false = LSL.
4376 */
4377IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4378{
4379 uint64_t u64TmpDst = *pu16Dst;
4380 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar);
4381 *pu16Dst = (uint16_t)u64TmpDst;
4382 return VINF_SUCCESS;
4383}
4384
4385
4386/**
4387 * Implements lgdt.
4388 *
4389 * @param iEffSeg The segment of the new gdtr contents
4390 * @param GCPtrEffSrc The address of the new gdtr contents.
4391 * @param enmEffOpSize The effective operand size.
4392 */
4393IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4394{
4395 if (pIemCpu->uCpl != 0)
4396 return iemRaiseGeneralProtectionFault0(pIemCpu);
4397 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4398
4399 /*
4400 * Fetch the limit and base address.
4401 */
4402 uint16_t cbLimit;
4403 RTGCPTR GCPtrBase;
4404 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4405 if (rcStrict == VINF_SUCCESS)
4406 {
4407 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4408 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4409 else
4410 {
4411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4412 pCtx->gdtr.cbGdt = cbLimit;
4413 pCtx->gdtr.pGdt = GCPtrBase;
4414 }
4415 if (rcStrict == VINF_SUCCESS)
4416 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4417 }
4418 return rcStrict;
4419}
4420
4421
4422/**
4423 * Implements sgdt.
4424 *
4425 * @param iEffSeg The segment where to store the gdtr content.
4426 * @param GCPtrEffDst The address where to store the gdtr content.
4427 * @param enmEffOpSize The effective operand size.
4428 */
4429IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
4430{
4431 /*
4432 * Join paths with sidt.
4433 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4434 * you really must know.
4435 */
4436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4437 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
4438 if (rcStrict == VINF_SUCCESS)
4439 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4440 return rcStrict;
4441}
4442
4443
4444/**
4445 * Implements lidt.
4446 *
4447 * @param iEffSeg The segment of the new idtr contents
4448 * @param GCPtrEffSrc The address of the new idtr contents.
4449 * @param enmEffOpSize The effective operand size.
4450 */
4451IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4452{
4453 if (pIemCpu->uCpl != 0)
4454 return iemRaiseGeneralProtectionFault0(pIemCpu);
4455 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4456
4457 /*
4458 * Fetch the limit and base address.
4459 */
4460 uint16_t cbLimit;
4461 RTGCPTR GCPtrBase;
4462 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4463 if (rcStrict == VINF_SUCCESS)
4464 {
4465 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4466 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4467 else
4468 {
4469 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4470 pCtx->idtr.cbIdt = cbLimit;
4471 pCtx->idtr.pIdt = GCPtrBase;
4472 }
4473 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4474 }
4475 return rcStrict;
4476}
4477
4478
4479/**
4480 * Implements sidt.
4481 *
4482 * @param iEffSeg The segment where to store the idtr content.
4483 * @param GCPtrEffDst The address where to store the idtr content.
4484 * @param enmEffOpSize The effective operand size.
4485 */
4486IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
4487{
4488 /*
4489 * Join paths with sgdt.
4490 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4491 * you really must know.
4492 */
4493 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4494 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
4495 if (rcStrict == VINF_SUCCESS)
4496 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4497 return rcStrict;
4498}
4499
4500
4501/**
4502 * Implements lldt.
4503 *
4504 * @param uNewLdt The new LDT selector value.
4505 */
4506IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4507{
4508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4509
4510 /*
4511 * Check preconditions.
4512 */
4513 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4514 {
4515 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4516 return iemRaiseUndefinedOpcode(pIemCpu);
4517 }
4518 if (pIemCpu->uCpl != 0)
4519 {
4520 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
4521 return iemRaiseGeneralProtectionFault0(pIemCpu);
4522 }
4523 if (uNewLdt & X86_SEL_LDT)
4524 {
4525 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4526 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
4527 }
4528
4529 /*
4530 * Now, loading a NULL selector is easy.
4531 */
4532 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4533 {
4534 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4535 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4536 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
4537 else
4538 pCtx->ldtr.Sel = uNewLdt;
4539 pCtx->ldtr.ValidSel = uNewLdt;
4540 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4541 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4542 {
4543 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4544 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4545 }
4546 else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
4547 {
4548 /* AMD-V seems to leave the base and limit alone. */
4549 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4550 }
4551 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4552 {
4553 /* VT-x (Intel 3960x) seems to be doing the following. */
4554 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4555 pCtx->ldtr.u64Base = 0;
4556 pCtx->ldtr.u32Limit = UINT32_MAX;
4557 }
4558
4559 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4560 return VINF_SUCCESS;
4561 }
4562
4563 /*
4564 * Read the descriptor.
4565 */
4566 IEMSELDESC Desc;
4567 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4568 if (rcStrict != VINF_SUCCESS)
4569 return rcStrict;
4570
4571 /* Check GPs first. */
4572 if (Desc.Legacy.Gen.u1DescType)
4573 {
4574 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4575 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4576 }
4577 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4578 {
4579 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4580 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4581 }
4582 uint64_t u64Base;
4583 if (!IEM_IS_LONG_MODE(pIemCpu))
4584 u64Base = X86DESC_BASE(&Desc.Legacy);
4585 else
4586 {
4587 if (Desc.Long.Gen.u5Zeros)
4588 {
4589 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4590 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4591 }
4592
4593 u64Base = X86DESC64_BASE(&Desc.Long);
4594 if (!IEM_IS_CANONICAL(u64Base))
4595 {
4596 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4597 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4598 }
4599 }
4600
4601 /* NP */
4602 if (!Desc.Legacy.Gen.u1Present)
4603 {
4604 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4605 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
4606 }
4607
4608 /*
4609 * It checks out alright, update the registers.
4610 */
4611/** @todo check if the actual value is loaded or if the RPL is dropped */
4612 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4613 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
4614 else
4615 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4616 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4617 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4618 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4619 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4620 pCtx->ldtr.u64Base = u64Base;
4621
4622 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4623 return VINF_SUCCESS;
4624}
4625
4626
4627/**
4628 * Implements lldt.
4629 *
4630 * @param uNewLdt The new LDT selector value.
4631 */
4632IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4633{
4634 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4635
4636 /*
4637 * Check preconditions.
4638 */
4639 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4640 {
4641 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4642 return iemRaiseUndefinedOpcode(pIemCpu);
4643 }
4644 if (pIemCpu->uCpl != 0)
4645 {
4646 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
4647 return iemRaiseGeneralProtectionFault0(pIemCpu);
4648 }
4649 if (uNewTr & X86_SEL_LDT)
4650 {
4651 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4652 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
4653 }
4654 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4655 {
4656 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4657 return iemRaiseGeneralProtectionFault0(pIemCpu);
4658 }
4659
4660 /*
4661 * Read the descriptor.
4662 */
4663 IEMSELDESC Desc;
4664 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4665 if (rcStrict != VINF_SUCCESS)
4666 return rcStrict;
4667
4668 /* Check GPs first. */
4669 if (Desc.Legacy.Gen.u1DescType)
4670 {
4671 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4672 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4673 }
4674 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4675 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4676 || IEM_IS_LONG_MODE(pIemCpu)) )
4677 {
4678 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4679 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4680 }
4681 uint64_t u64Base;
4682 if (!IEM_IS_LONG_MODE(pIemCpu))
4683 u64Base = X86DESC_BASE(&Desc.Legacy);
4684 else
4685 {
4686 if (Desc.Long.Gen.u5Zeros)
4687 {
4688 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4689 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4690 }
4691
4692 u64Base = X86DESC64_BASE(&Desc.Long);
4693 if (!IEM_IS_CANONICAL(u64Base))
4694 {
4695 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
4696 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4697 }
4698 }
4699
4700 /* NP */
4701 if (!Desc.Legacy.Gen.u1Present)
4702 {
4703 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
4704 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
4705 }
4706
4707 /*
4708 * Set it busy.
4709 * Note! Intel says this should lock down the whole descriptor, but we'll
4710 * restrict our selves to 32-bit for now due to lack of inline
4711 * assembly and such.
4712 */
4713 void *pvDesc;
4714 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
4715 if (rcStrict != VINF_SUCCESS)
4716 return rcStrict;
4717 switch ((uintptr_t)pvDesc & 3)
4718 {
4719 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
4720 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
4721 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
4722 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
4723 }
4724 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
4725 if (rcStrict != VINF_SUCCESS)
4726 return rcStrict;
4727 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4728
4729 /*
4730 * It checks out alright, update the registers.
4731 */
4732/** @todo check if the actual value is loaded or if the RPL is dropped */
4733 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4734 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
4735 else
4736 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
4737 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
4738 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4739 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4740 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4741 pCtx->tr.u64Base = u64Base;
4742
4743 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4744 return VINF_SUCCESS;
4745}
4746
4747
4748/**
4749 * Implements mov GReg,CRx.
4750 *
4751 * @param iGReg The general register to store the CRx value in.
4752 * @param iCrReg The CRx register to read (valid).
4753 */
4754IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4755{
4756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4757 if (pIemCpu->uCpl != 0)
4758 return iemRaiseGeneralProtectionFault0(pIemCpu);
4759 Assert(!pCtx->eflags.Bits.u1VM);
4760
4761 /* read it */
4762 uint64_t crX;
4763 switch (iCrReg)
4764 {
4765 case 0: crX = pCtx->cr0; break;
4766 case 2: crX = pCtx->cr2; break;
4767 case 3: crX = pCtx->cr3; break;
4768 case 4: crX = pCtx->cr4; break;
4769 case 8:
4770 {
4771 uint8_t uTpr;
4772 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
4773 if (RT_SUCCESS(rc))
4774 crX = uTpr >> 4;
4775 else
4776 crX = 0;
4777 break;
4778 }
4779 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4780 }
4781
4782 /* store it */
4783 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4784 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4785 else
4786 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4787
4788 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4789 return VINF_SUCCESS;
4790}
4791
4792
4793/**
4794 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
4795 *
4796 * @param iCrReg The CRx register to write (valid).
4797 * @param uNewCrX The new value.
4798 */
4799IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
4800{
4801 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4802 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4803 VBOXSTRICTRC rcStrict;
4804 int rc;
4805
4806 /*
4807 * Try store it.
4808 * Unfortunately, CPUM only does a tiny bit of the work.
4809 */
4810 switch (iCrReg)
4811 {
4812 case 0:
4813 {
4814 /*
4815 * Perform checks.
4816 */
4817 uint64_t const uOldCrX = pCtx->cr0;
4818 uNewCrX |= X86_CR0_ET; /* hardcoded */
4819
4820 /* Check for reserved bits. */
4821 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4822 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4823 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4824 if (uNewCrX & ~(uint64_t)fValid)
4825 {
4826 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
4827 return iemRaiseGeneralProtectionFault0(pIemCpu);
4828 }
4829
4830 /* Check for invalid combinations. */
4831 if ( (uNewCrX & X86_CR0_PG)
4832 && !(uNewCrX & X86_CR0_PE) )
4833 {
4834 Log(("Trying to set CR0.PG without CR0.PE\n"));
4835 return iemRaiseGeneralProtectionFault0(pIemCpu);
4836 }
4837
4838 if ( !(uNewCrX & X86_CR0_CD)
4839 && (uNewCrX & X86_CR0_NW) )
4840 {
4841 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4842 return iemRaiseGeneralProtectionFault0(pIemCpu);
4843 }
4844
4845 /* Long mode consistency checks. */
4846 if ( (uNewCrX & X86_CR0_PG)
4847 && !(uOldCrX & X86_CR0_PG)
4848 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4849 {
4850 if (!(pCtx->cr4 & X86_CR4_PAE))
4851 {
4852 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4853 return iemRaiseGeneralProtectionFault0(pIemCpu);
4854 }
4855 if (pCtx->cs.Attr.n.u1Long)
4856 {
4857 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4858 return iemRaiseGeneralProtectionFault0(pIemCpu);
4859 }
4860 }
4861
4862 /** @todo check reserved PDPTR bits as AMD states. */
4863
4864 /*
4865 * Change CR0.
4866 */
4867 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4868 CPUMSetGuestCR0(pVCpu, uNewCrX);
4869 else
4870 pCtx->cr0 = uNewCrX;
4871 Assert(pCtx->cr0 == uNewCrX);
4872
4873 /*
4874 * Change EFER.LMA if entering or leaving long mode.
4875 */
4876 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
4877 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4878 {
4879 uint64_t NewEFER = pCtx->msrEFER;
4880 if (uNewCrX & X86_CR0_PG)
4881 NewEFER |= MSR_K6_EFER_LMA;
4882 else
4883 NewEFER &= ~MSR_K6_EFER_LMA;
4884
4885 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4886 CPUMSetGuestEFER(pVCpu, NewEFER);
4887 else
4888 pCtx->msrEFER = NewEFER;
4889 Assert(pCtx->msrEFER == NewEFER);
4890 }
4891
4892 /*
4893 * Inform PGM.
4894 */
4895 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4896 {
4897 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4898 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4899 {
4900 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4901 AssertRCReturn(rc, rc);
4902 /* ignore informational status codes */
4903 }
4904 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4905 }
4906 else
4907 rcStrict = VINF_SUCCESS;
4908
4909#ifdef IN_RC
4910 /* Return to ring-3 for rescheduling if WP or AM changes. */
4911 if ( rcStrict == VINF_SUCCESS
4912 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
4913 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
4914 rcStrict = VINF_EM_RESCHEDULE;
4915#endif
4916 break;
4917 }
4918
4919 /*
4920 * CR2 can be changed without any restrictions.
4921 */
4922 case 2:
4923 pCtx->cr2 = uNewCrX;
4924 rcStrict = VINF_SUCCESS;
4925 break;
4926
4927 /*
4928 * CR3 is relatively simple, although AMD and Intel have different
4929 * accounts of how setting reserved bits are handled. We take intel's
4930 * word for the lower bits and AMD's for the high bits (63:52). The
4931 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
4932 * on this.
4933 */
4934 /** @todo Testcase: Setting reserved bits in CR3, especially before
4935 * enabling paging. */
4936 case 3:
4937 {
4938 /* check / mask the value. */
4939 if (uNewCrX & UINT64_C(0xfff0000000000000))
4940 {
4941 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
4942 return iemRaiseGeneralProtectionFault0(pIemCpu);
4943 }
4944
4945 uint64_t fValid;
4946 if ( (pCtx->cr4 & X86_CR4_PAE)
4947 && (pCtx->msrEFER & MSR_K6_EFER_LME))
4948 fValid = UINT64_C(0x000fffffffffffff);
4949 else
4950 fValid = UINT64_C(0xffffffff);
4951 if (uNewCrX & ~fValid)
4952 {
4953 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
4954 uNewCrX, uNewCrX & ~fValid));
4955 uNewCrX &= fValid;
4956 }
4957
4958 /** @todo If we're in PAE mode we should check the PDPTRs for
4959 * invalid bits. */
4960
4961 /* Make the change. */
4962 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4963 {
4964 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
4965 AssertRCSuccessReturn(rc, rc);
4966 }
4967 else
4968 pCtx->cr3 = uNewCrX;
4969
4970 /* Inform PGM. */
4971 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4972 {
4973 if (pCtx->cr0 & X86_CR0_PG)
4974 {
4975 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4976 AssertRCReturn(rc, rc);
4977 /* ignore informational status codes */
4978 }
4979 }
4980 rcStrict = VINF_SUCCESS;
4981 break;
4982 }
4983
4984 /*
4985 * CR4 is a bit more tedious as there are bits which cannot be cleared
4986 * under some circumstances and such.
4987 */
4988 case 4:
4989 {
4990 uint64_t const uOldCrX = pCtx->cr4;
4991
4992 /** @todo Shouldn't this look at the guest CPUID bits to determine
4993 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
4994 * should #GP(0). */
4995 /* reserved bits */
4996 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
4997 | X86_CR4_TSD | X86_CR4_DE
4998 | X86_CR4_PSE | X86_CR4_PAE
4999 | X86_CR4_MCE | X86_CR4_PGE
5000 | X86_CR4_PCE | X86_CR4_OSFXSR
5001 | X86_CR4_OSXMMEEXCPT;
5002 //if (xxx)
5003 // fValid |= X86_CR4_VMXE;
5004 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
5005 fValid |= X86_CR4_OSXSAVE;
5006 if (uNewCrX & ~(uint64_t)fValid)
5007 {
5008 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5009 return iemRaiseGeneralProtectionFault0(pIemCpu);
5010 }
5011
5012 /* long mode checks. */
5013 if ( (uOldCrX & X86_CR4_PAE)
5014 && !(uNewCrX & X86_CR4_PAE)
5015 && CPUMIsGuestInLongModeEx(pCtx) )
5016 {
5017 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5018 return iemRaiseGeneralProtectionFault0(pIemCpu);
5019 }
5020
5021
5022 /*
5023 * Change it.
5024 */
5025 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5026 {
5027 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5028 AssertRCSuccessReturn(rc, rc);
5029 }
5030 else
5031 pCtx->cr4 = uNewCrX;
5032 Assert(pCtx->cr4 == uNewCrX);
5033
5034 /*
5035 * Notify SELM and PGM.
5036 */
5037 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5038 {
5039 /* SELM - VME may change things wrt to the TSS shadowing. */
5040 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5041 {
5042 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5043 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5044#ifdef VBOX_WITH_RAW_MODE
5045 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
5046 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5047#endif
5048 }
5049
5050 /* PGM - flushing and mode. */
5051 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5052 {
5053 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5054 AssertRCReturn(rc, rc);
5055 /* ignore informational status codes */
5056 }
5057 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5058 }
5059 else
5060 rcStrict = VINF_SUCCESS;
5061 break;
5062 }
5063
5064 /*
5065 * CR8 maps to the APIC TPR.
5066 */
5067 case 8:
5068 if (uNewCrX & ~(uint64_t)0xf)
5069 {
5070 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5071 return iemRaiseGeneralProtectionFault0(pIemCpu);
5072 }
5073
5074 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5075 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
5076 rcStrict = VINF_SUCCESS;
5077 break;
5078
5079 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5080 }
5081
5082 /*
5083 * Advance the RIP on success.
5084 */
5085 if (RT_SUCCESS(rcStrict))
5086 {
5087 if (rcStrict != VINF_SUCCESS)
5088 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5089 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5090 }
5091
5092 return rcStrict;
5093}
5094
5095
5096/**
5097 * Implements mov CRx,GReg.
5098 *
5099 * @param iCrReg The CRx register to write (valid).
5100 * @param iGReg The general register to load the DRx value from.
5101 */
5102IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5103{
5104 if (pIemCpu->uCpl != 0)
5105 return iemRaiseGeneralProtectionFault0(pIemCpu);
5106 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5107
5108 /*
5109 * Read the new value from the source register and call common worker.
5110 */
5111 uint64_t uNewCrX;
5112 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5113 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
5114 else
5115 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
5116 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
5117}
5118
5119
5120/**
5121 * Implements 'LMSW r/m16'
5122 *
5123 * @param u16NewMsw The new value.
5124 */
5125IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5126{
5127 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5128
5129 if (pIemCpu->uCpl != 0)
5130 return iemRaiseGeneralProtectionFault0(pIemCpu);
5131 Assert(!pCtx->eflags.Bits.u1VM);
5132
5133 /*
5134 * Compose the new CR0 value and call common worker.
5135 */
5136 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5137 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5138 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5139}
5140
5141
5142/**
5143 * Implements 'CLTS'.
5144 */
5145IEM_CIMPL_DEF_0(iemCImpl_clts)
5146{
5147 if (pIemCpu->uCpl != 0)
5148 return iemRaiseGeneralProtectionFault0(pIemCpu);
5149
5150 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5151 uint64_t uNewCr0 = pCtx->cr0;
5152 uNewCr0 &= ~X86_CR0_TS;
5153 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5154}
5155
5156
5157/**
5158 * Implements mov GReg,DRx.
5159 *
5160 * @param iGReg The general register to store the DRx value in.
5161 * @param iDrReg The DRx register to read (0-7).
5162 */
5163IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5164{
5165 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5166
5167 /*
5168 * Check preconditions.
5169 */
5170
5171 /* Raise GPs. */
5172 if (pIemCpu->uCpl != 0)
5173 return iemRaiseGeneralProtectionFault0(pIemCpu);
5174 Assert(!pCtx->eflags.Bits.u1VM);
5175
5176 if ( (iDrReg == 4 || iDrReg == 5)
5177 && (pCtx->cr4 & X86_CR4_DE) )
5178 {
5179 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5180 return iemRaiseGeneralProtectionFault0(pIemCpu);
5181 }
5182
5183 /* Raise #DB if general access detect is enabled. */
5184 if (pCtx->dr[7] & X86_DR7_GD)
5185 {
5186 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5187 return iemRaiseDebugException(pIemCpu);
5188 }
5189
5190 /*
5191 * Read the debug register and store it in the specified general register.
5192 */
5193 uint64_t drX;
5194 switch (iDrReg)
5195 {
5196 case 0: drX = pCtx->dr[0]; break;
5197 case 1: drX = pCtx->dr[1]; break;
5198 case 2: drX = pCtx->dr[2]; break;
5199 case 3: drX = pCtx->dr[3]; break;
5200 case 6:
5201 case 4:
5202 drX = pCtx->dr[6];
5203 drX |= X86_DR6_RA1_MASK;
5204 drX &= ~X86_DR6_RAZ_MASK;
5205 break;
5206 case 7:
5207 case 5:
5208 drX = pCtx->dr[7];
5209 drX |=X86_DR7_RA1_MASK;
5210 drX &= ~X86_DR7_RAZ_MASK;
5211 break;
5212 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5213 }
5214
5215 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5216 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
5217 else
5218 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
5219
5220 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5221 return VINF_SUCCESS;
5222}
5223
5224
5225/**
5226 * Implements mov DRx,GReg.
5227 *
5228 * @param iDrReg The DRx register to write (valid).
5229 * @param iGReg The general register to load the DRx value from.
5230 */
5231IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5232{
5233 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5234
5235 /*
5236 * Check preconditions.
5237 */
5238 if (pIemCpu->uCpl != 0)
5239 return iemRaiseGeneralProtectionFault0(pIemCpu);
5240 Assert(!pCtx->eflags.Bits.u1VM);
5241
5242 if (iDrReg == 4 || iDrReg == 5)
5243 {
5244 if (pCtx->cr4 & X86_CR4_DE)
5245 {
5246 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5247 return iemRaiseGeneralProtectionFault0(pIemCpu);
5248 }
5249 iDrReg += 2;
5250 }
5251
5252 /* Raise #DB if general access detect is enabled. */
5253 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5254 * \#GP? */
5255 if (pCtx->dr[7] & X86_DR7_GD)
5256 {
5257 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5258 return iemRaiseDebugException(pIemCpu);
5259 }
5260
5261 /*
5262 * Read the new value from the source register.
5263 */
5264 uint64_t uNewDrX;
5265 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5266 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
5267 else
5268 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
5269
5270 /*
5271 * Adjust it.
5272 */
5273 switch (iDrReg)
5274 {
5275 case 0:
5276 case 1:
5277 case 2:
5278 case 3:
5279 /* nothing to adjust */
5280 break;
5281
5282 case 6:
5283 if (uNewDrX & X86_DR6_MBZ_MASK)
5284 {
5285 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5286 return iemRaiseGeneralProtectionFault0(pIemCpu);
5287 }
5288 uNewDrX |= X86_DR6_RA1_MASK;
5289 uNewDrX &= ~X86_DR6_RAZ_MASK;
5290 break;
5291
5292 case 7:
5293 if (uNewDrX & X86_DR7_MBZ_MASK)
5294 {
5295 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5296 return iemRaiseGeneralProtectionFault0(pIemCpu);
5297 }
5298 uNewDrX |= X86_DR7_RA1_MASK;
5299 uNewDrX &= ~X86_DR7_RAZ_MASK;
5300 break;
5301
5302 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5303 }
5304
5305 /*
5306 * Do the actual setting.
5307 */
5308 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5309 {
5310 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
5311 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5312 }
5313 else
5314 pCtx->dr[iDrReg] = uNewDrX;
5315
5316 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5317 return VINF_SUCCESS;
5318}
5319
5320
5321/**
5322 * Implements 'INVLPG m'.
5323 *
5324 * @param GCPtrPage The effective address of the page to invalidate.
5325 * @remarks Updates the RIP.
5326 */
5327IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5328{
5329 /* ring-0 only. */
5330 if (pIemCpu->uCpl != 0)
5331 return iemRaiseGeneralProtectionFault0(pIemCpu);
5332 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5333
5334 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
5335 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5336
5337 if (rc == VINF_SUCCESS)
5338 return VINF_SUCCESS;
5339 if (rc == VINF_PGM_SYNC_CR3)
5340 return iemSetPassUpStatus(pIemCpu, rc);
5341
5342 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5343 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5344 return rc;
5345}
5346
5347
5348/**
5349 * Implements RDTSC.
5350 */
5351IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5352{
5353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5354
5355 /*
5356 * Check preconditions.
5357 */
5358 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
5359 return iemRaiseUndefinedOpcode(pIemCpu);
5360
5361 if ( (pCtx->cr4 & X86_CR4_TSD)
5362 && pIemCpu->uCpl != 0)
5363 {
5364 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
5365 return iemRaiseGeneralProtectionFault0(pIemCpu);
5366 }
5367
5368 /*
5369 * Do the job.
5370 */
5371 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
5372 pCtx->rax = (uint32_t)uTicks;
5373 pCtx->rdx = uTicks >> 32;
5374#ifdef IEM_VERIFICATION_MODE_FULL
5375 pIemCpu->fIgnoreRaxRdx = true;
5376#endif
5377
5378 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5379 return VINF_SUCCESS;
5380}
5381
5382
5383/**
5384 * Implements RDMSR.
5385 */
5386IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5387{
5388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5389
5390 /*
5391 * Check preconditions.
5392 */
5393 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5394 return iemRaiseUndefinedOpcode(pIemCpu);
5395 if (pIemCpu->uCpl != 0)
5396 return iemRaiseGeneralProtectionFault0(pIemCpu);
5397
5398 /*
5399 * Do the job.
5400 */
5401 RTUINT64U uValue;
5402 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
5403 if (rcStrict == VINF_SUCCESS)
5404 {
5405 pCtx->rax = uValue.s.Lo;
5406 pCtx->rdx = uValue.s.Hi;
5407
5408 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5409 return VINF_SUCCESS;
5410 }
5411
5412#ifndef IN_RING3
5413 /* Deferred to ring-3. */
5414 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5415 {
5416 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5417 return rcStrict;
5418 }
5419#else /* IN_RING3 */
5420 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5421 static uint32_t s_cTimes = 0;
5422 if (s_cTimes++ < 10)
5423 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5424 else
5425#endif
5426 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5427 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5428 return iemRaiseGeneralProtectionFault0(pIemCpu);
5429}
5430
5431
5432/**
5433 * Implements WRMSR.
5434 */
5435IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5436{
5437 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5438
5439 /*
5440 * Check preconditions.
5441 */
5442 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5443 return iemRaiseUndefinedOpcode(pIemCpu);
5444 if (pIemCpu->uCpl != 0)
5445 return iemRaiseGeneralProtectionFault0(pIemCpu);
5446
5447 /*
5448 * Do the job.
5449 */
5450 RTUINT64U uValue;
5451 uValue.s.Lo = pCtx->eax;
5452 uValue.s.Hi = pCtx->edx;
5453
5454 VBOXSTRICTRC rcStrict;
5455 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5456 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5457 else
5458 {
5459#ifdef IN_RING3
5460 CPUMCTX CtxTmp = *pCtx;
5461 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5462 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5463 *pCtx = *pCtx2;
5464 *pCtx2 = CtxTmp;
5465#else
5466 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5467#endif
5468 }
5469 if (rcStrict == VINF_SUCCESS)
5470 {
5471 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5472 return VINF_SUCCESS;
5473 }
5474
5475#ifndef IN_RING3
5476 /* Deferred to ring-3. */
5477 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5478 {
5479 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5480 return rcStrict;
5481 }
5482#else /* IN_RING3 */
5483 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5484 static uint32_t s_cTimes = 0;
5485 if (s_cTimes++ < 10)
5486 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5487 else
5488#endif
5489 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5490 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5491 return iemRaiseGeneralProtectionFault0(pIemCpu);
5492}
5493
5494
5495/**
5496 * Implements 'IN eAX, port'.
5497 *
5498 * @param u16Port The source port.
5499 * @param cbReg The register size.
5500 */
5501IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5502{
5503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5504
5505 /*
5506 * CPL check
5507 */
5508 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5509 if (rcStrict != VINF_SUCCESS)
5510 return rcStrict;
5511
5512 /*
5513 * Perform the I/O.
5514 */
5515 uint32_t u32Value;
5516 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5517 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
5518 else
5519 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5520 if (IOM_SUCCESS(rcStrict))
5521 {
5522 switch (cbReg)
5523 {
5524 case 1: pCtx->al = (uint8_t)u32Value; break;
5525 case 2: pCtx->ax = (uint16_t)u32Value; break;
5526 case 4: pCtx->rax = u32Value; break;
5527 default: AssertFailedReturn(VERR_IEM_IPE_3);
5528 }
5529 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5530 pIemCpu->cPotentialExits++;
5531 if (rcStrict != VINF_SUCCESS)
5532 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5533 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5534
5535 /*
5536 * Check for I/O breakpoints.
5537 */
5538 uint32_t const uDr7 = pCtx->dr[7];
5539 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5540 && X86_DR7_ANY_RW_IO(uDr7)
5541 && (pCtx->cr4 & X86_CR4_DE))
5542 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5543 {
5544 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5545 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5546 rcStrict = iemRaiseDebugException(pIemCpu);
5547 }
5548 }
5549
5550 return rcStrict;
5551}
5552
5553
5554/**
5555 * Implements 'IN eAX, DX'.
5556 *
5557 * @param cbReg The register size.
5558 */
5559IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5560{
5561 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5562}
5563
5564
5565/**
5566 * Implements 'OUT port, eAX'.
5567 *
5568 * @param u16Port The destination port.
5569 * @param cbReg The register size.
5570 */
5571IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5572{
5573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5574
5575 /*
5576 * CPL check
5577 */
5578 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5579 if (rcStrict != VINF_SUCCESS)
5580 return rcStrict;
5581
5582 /*
5583 * Perform the I/O.
5584 */
5585 uint32_t u32Value;
5586 switch (cbReg)
5587 {
5588 case 1: u32Value = pCtx->al; break;
5589 case 2: u32Value = pCtx->ax; break;
5590 case 4: u32Value = pCtx->eax; break;
5591 default: AssertFailedReturn(VERR_IEM_IPE_4);
5592 }
5593 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5594 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
5595 else
5596 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5597 if (IOM_SUCCESS(rcStrict))
5598 {
5599 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5600 pIemCpu->cPotentialExits++;
5601 if (rcStrict != VINF_SUCCESS)
5602 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5603 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5604
5605 /*
5606 * Check for I/O breakpoints.
5607 */
5608 uint32_t const uDr7 = pCtx->dr[7];
5609 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5610 && X86_DR7_ANY_RW_IO(uDr7)
5611 && (pCtx->cr4 & X86_CR4_DE))
5612 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5613 {
5614 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5615 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5616 rcStrict = iemRaiseDebugException(pIemCpu);
5617 }
5618 }
5619 return rcStrict;
5620}
5621
5622
5623/**
5624 * Implements 'OUT DX, eAX'.
5625 *
5626 * @param cbReg The register size.
5627 */
5628IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5629{
5630 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5631}
5632
5633
5634/**
5635 * Implements 'CLI'.
5636 */
5637IEM_CIMPL_DEF_0(iemCImpl_cli)
5638{
5639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5640 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5641 uint32_t const fEflOld = fEfl;
5642 if (pCtx->cr0 & X86_CR0_PE)
5643 {
5644 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5645 if (!(fEfl & X86_EFL_VM))
5646 {
5647 if (pIemCpu->uCpl <= uIopl)
5648 fEfl &= ~X86_EFL_IF;
5649 else if ( pIemCpu->uCpl == 3
5650 && (pCtx->cr4 & X86_CR4_PVI) )
5651 fEfl &= ~X86_EFL_VIF;
5652 else
5653 return iemRaiseGeneralProtectionFault0(pIemCpu);
5654 }
5655 /* V8086 */
5656 else if (uIopl == 3)
5657 fEfl &= ~X86_EFL_IF;
5658 else if ( uIopl < 3
5659 && (pCtx->cr4 & X86_CR4_VME) )
5660 fEfl &= ~X86_EFL_VIF;
5661 else
5662 return iemRaiseGeneralProtectionFault0(pIemCpu);
5663 }
5664 /* real mode */
5665 else
5666 fEfl &= ~X86_EFL_IF;
5667
5668 /* Commit. */
5669 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5670 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5671 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
5672 return VINF_SUCCESS;
5673}
5674
5675
5676/**
5677 * Implements 'STI'.
5678 */
5679IEM_CIMPL_DEF_0(iemCImpl_sti)
5680{
5681 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5682 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5683 uint32_t const fEflOld = fEfl;
5684
5685 if (pCtx->cr0 & X86_CR0_PE)
5686 {
5687 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5688 if (!(fEfl & X86_EFL_VM))
5689 {
5690 if (pIemCpu->uCpl <= uIopl)
5691 fEfl |= X86_EFL_IF;
5692 else if ( pIemCpu->uCpl == 3
5693 && (pCtx->cr4 & X86_CR4_PVI)
5694 && !(fEfl & X86_EFL_VIP) )
5695 fEfl |= X86_EFL_VIF;
5696 else
5697 return iemRaiseGeneralProtectionFault0(pIemCpu);
5698 }
5699 /* V8086 */
5700 else if (uIopl == 3)
5701 fEfl |= X86_EFL_IF;
5702 else if ( uIopl < 3
5703 && (pCtx->cr4 & X86_CR4_VME)
5704 && !(fEfl & X86_EFL_VIP) )
5705 fEfl |= X86_EFL_VIF;
5706 else
5707 return iemRaiseGeneralProtectionFault0(pIemCpu);
5708 }
5709 /* real mode */
5710 else
5711 fEfl |= X86_EFL_IF;
5712
5713 /* Commit. */
5714 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5715 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5716 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
5717 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5718 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
5719 return VINF_SUCCESS;
5720}
5721
5722
5723/**
5724 * Implements 'HLT'.
5725 */
5726IEM_CIMPL_DEF_0(iemCImpl_hlt)
5727{
5728 if (pIemCpu->uCpl != 0)
5729 return iemRaiseGeneralProtectionFault0(pIemCpu);
5730 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5731 return VINF_EM_HALT;
5732}
5733
5734
5735/**
5736 * Implements 'MONITOR'.
5737 */
5738IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
5739{
5740 /*
5741 * Permission checks.
5742 */
5743 if (pIemCpu->uCpl != 0)
5744 {
5745 Log2(("monitor: CPL != 0\n"));
5746 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
5747 }
5748 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5749 {
5750 Log2(("monitor: Not in CPUID\n"));
5751 return iemRaiseUndefinedOpcode(pIemCpu);
5752 }
5753
5754 /*
5755 * Gather the operands and validate them.
5756 */
5757 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5758 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
5759 uint32_t uEcx = pCtx->ecx;
5760 uint32_t uEdx = pCtx->edx;
5761/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
5762 * \#GP first. */
5763 if (uEcx != 0)
5764 {
5765 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
5766 return iemRaiseGeneralProtectionFault0(pIemCpu);
5767 }
5768
5769 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
5770 if (rcStrict != VINF_SUCCESS)
5771 return rcStrict;
5772
5773 RTGCPHYS GCPhysMem;
5774 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
5775 if (rcStrict != VINF_SUCCESS)
5776 return rcStrict;
5777
5778 /*
5779 * Call EM to prepare the monitor/wait.
5780 */
5781 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
5782 Assert(rcStrict == VINF_SUCCESS);
5783
5784 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5785 return rcStrict;
5786}
5787
5788
5789/**
5790 * Implements 'MWAIT'.
5791 */
5792IEM_CIMPL_DEF_0(iemCImpl_mwait)
5793{
5794 /*
5795 * Permission checks.
5796 */
5797 if (pIemCpu->uCpl != 0)
5798 {
5799 Log2(("mwait: CPL != 0\n"));
5800 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
5801 * EFLAGS.VM then.) */
5802 return iemRaiseUndefinedOpcode(pIemCpu);
5803 }
5804 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5805 {
5806 Log2(("mwait: Not in CPUID\n"));
5807 return iemRaiseUndefinedOpcode(pIemCpu);
5808 }
5809
5810 /*
5811 * Gather the operands and validate them.
5812 */
5813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5814 uint32_t uEax = pCtx->eax;
5815 uint32_t uEcx = pCtx->ecx;
5816 if (uEcx != 0)
5817 {
5818 /* Only supported extension is break on IRQ when IF=0. */
5819 if (uEcx > 1)
5820 {
5821 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
5822 return iemRaiseGeneralProtectionFault0(pIemCpu);
5823 }
5824 uint32_t fMWaitFeatures = 0;
5825 uint32_t uIgnore = 0;
5826 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
5827 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5828 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5829 {
5830 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
5831 return iemRaiseGeneralProtectionFault0(pIemCpu);
5832 }
5833 }
5834
5835 /*
5836 * Call EM to prepare the monitor/wait.
5837 */
5838 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
5839
5840 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5841 return rcStrict;
5842}
5843
5844
5845/**
5846 * Implements 'SWAPGS'.
5847 */
5848IEM_CIMPL_DEF_0(iemCImpl_swapgs)
5849{
5850 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
5851
5852 /*
5853 * Permission checks.
5854 */
5855 if (pIemCpu->uCpl != 0)
5856 {
5857 Log2(("swapgs: CPL != 0\n"));
5858 return iemRaiseUndefinedOpcode(pIemCpu);
5859 }
5860
5861 /*
5862 * Do the job.
5863 */
5864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5865 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
5866 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
5867 pCtx->gs.u64Base = uOtherGsBase;
5868
5869 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5870 return VINF_SUCCESS;
5871}
5872
5873
5874/**
5875 * Implements 'CPUID'.
5876 */
5877IEM_CIMPL_DEF_0(iemCImpl_cpuid)
5878{
5879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5880
5881 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
5882 pCtx->rax &= UINT32_C(0xffffffff);
5883 pCtx->rbx &= UINT32_C(0xffffffff);
5884 pCtx->rcx &= UINT32_C(0xffffffff);
5885 pCtx->rdx &= UINT32_C(0xffffffff);
5886
5887 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5888 return VINF_SUCCESS;
5889}
5890
5891
5892/**
5893 * Implements 'AAD'.
5894 *
5895 * @param bImm The immediate operand.
5896 */
5897IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
5898{
5899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5900
5901 uint16_t const ax = pCtx->ax;
5902 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
5903 pCtx->ax = al;
5904 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5905 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5906 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5907
5908 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5909 return VINF_SUCCESS;
5910}
5911
5912
5913/**
5914 * Implements 'AAM'.
5915 *
5916 * @param bImm The immediate operand. Cannot be 0.
5917 */
5918IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
5919{
5920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5921 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
5922
5923 uint16_t const ax = pCtx->ax;
5924 uint8_t const al = (uint8_t)ax % bImm;
5925 uint8_t const ah = (uint8_t)ax / bImm;
5926 pCtx->ax = (ah << 8) + al;
5927 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5928 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5929 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5930
5931 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5932 return VINF_SUCCESS;
5933}
5934
5935
5936/**
5937 * Implements 'DAA'.
5938 */
5939IEM_CIMPL_DEF_0(iemCImpl_daa)
5940{
5941 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5942
5943 uint8_t const al = pCtx->al;
5944 bool const fCarry = pCtx->eflags.Bits.u1CF;
5945
5946 if ( pCtx->eflags.Bits.u1AF
5947 || (al & 0xf) >= 10)
5948 {
5949 pCtx->al = al + 6;
5950 pCtx->eflags.Bits.u1AF = 1;
5951 }
5952 else
5953 pCtx->eflags.Bits.u1AF = 0;
5954
5955 if (al >= 0x9a || fCarry)
5956 {
5957 pCtx->al += 0x60;
5958 pCtx->eflags.Bits.u1CF = 1;
5959 }
5960 else
5961 pCtx->eflags.Bits.u1CF = 0;
5962
5963 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
5964 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5965 return VINF_SUCCESS;
5966}
5967
5968
5969/**
5970 * Implements 'DAS'.
5971 */
5972IEM_CIMPL_DEF_0(iemCImpl_das)
5973{
5974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5975
5976 uint8_t const uInputAL = pCtx->al;
5977 bool const fCarry = pCtx->eflags.Bits.u1CF;
5978
5979 if ( pCtx->eflags.Bits.u1AF
5980 || (uInputAL & 0xf) >= 10)
5981 {
5982 pCtx->eflags.Bits.u1AF = 1;
5983 if (uInputAL < 6)
5984 pCtx->eflags.Bits.u1CF = 1;
5985 pCtx->al = uInputAL - 6;
5986 }
5987 else
5988 {
5989 pCtx->eflags.Bits.u1AF = 0;
5990 pCtx->eflags.Bits.u1CF = 0;
5991 }
5992
5993 if (uInputAL >= 0x9a || fCarry)
5994 {
5995 pCtx->al -= 0x60;
5996 pCtx->eflags.Bits.u1CF = 1;
5997 }
5998
5999 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6000 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6001 return VINF_SUCCESS;
6002}
6003
6004
6005
6006
6007/*
6008 * Instantiate the various string operation combinations.
6009 */
6010#define OP_SIZE 8
6011#define ADDR_SIZE 16
6012#include "IEMAllCImplStrInstr.cpp.h"
6013#define OP_SIZE 8
6014#define ADDR_SIZE 32
6015#include "IEMAllCImplStrInstr.cpp.h"
6016#define OP_SIZE 8
6017#define ADDR_SIZE 64
6018#include "IEMAllCImplStrInstr.cpp.h"
6019
6020#define OP_SIZE 16
6021#define ADDR_SIZE 16
6022#include "IEMAllCImplStrInstr.cpp.h"
6023#define OP_SIZE 16
6024#define ADDR_SIZE 32
6025#include "IEMAllCImplStrInstr.cpp.h"
6026#define OP_SIZE 16
6027#define ADDR_SIZE 64
6028#include "IEMAllCImplStrInstr.cpp.h"
6029
6030#define OP_SIZE 32
6031#define ADDR_SIZE 16
6032#include "IEMAllCImplStrInstr.cpp.h"
6033#define OP_SIZE 32
6034#define ADDR_SIZE 32
6035#include "IEMAllCImplStrInstr.cpp.h"
6036#define OP_SIZE 32
6037#define ADDR_SIZE 64
6038#include "IEMAllCImplStrInstr.cpp.h"
6039
6040#define OP_SIZE 64
6041#define ADDR_SIZE 32
6042#include "IEMAllCImplStrInstr.cpp.h"
6043#define OP_SIZE 64
6044#define ADDR_SIZE 64
6045#include "IEMAllCImplStrInstr.cpp.h"
6046
6047
6048/**
6049 * Implements 'XGETBV'.
6050 */
6051IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
6052{
6053 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6054 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6055 {
6056 uint32_t uEcx = pCtx->ecx;
6057 switch (uEcx)
6058 {
6059 case 0:
6060 break;
6061
6062 case 1: /** @todo Implement XCR1 support. */
6063 default:
6064 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
6065 return iemRaiseGeneralProtectionFault0(pIemCpu);
6066
6067 }
6068 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
6069 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
6070
6071 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6072 return VINF_SUCCESS;
6073 }
6074 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
6075 return iemRaiseUndefinedOpcode(pIemCpu);
6076}
6077
6078
6079/**
6080 * Implements 'XSETBV'.
6081 */
6082IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
6083{
6084 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6085 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6086 {
6087 if (pIemCpu->uCpl == 0)
6088 {
6089 uint32_t uEcx = pCtx->ecx;
6090 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
6091 switch (uEcx)
6092 {
6093 case 0:
6094 {
6095 int rc = CPUMSetGuestXcr0(IEMCPU_TO_VMCPU(pIemCpu), uNewValue);
6096 if (rc == VINF_SUCCESS)
6097 break;
6098 Assert(rc == VERR_CPUM_RAISE_GP_0);
6099 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6100 return iemRaiseGeneralProtectionFault0(pIemCpu);
6101 }
6102
6103 case 1: /** @todo Implement XCR1 support. */
6104 default:
6105 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6106 return iemRaiseGeneralProtectionFault0(pIemCpu);
6107
6108 }
6109
6110 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6111 return VINF_SUCCESS;
6112 }
6113
6114 Log(("xsetbv cpl=%u -> GP(0)\n", pIemCpu->uCpl));
6115 return iemRaiseGeneralProtectionFault0(pIemCpu);
6116 }
6117 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
6118 return iemRaiseUndefinedOpcode(pIemCpu);
6119}
6120
6121
6122
6123/**
6124 * Implements 'FINIT' and 'FNINIT'.
6125 *
6126 * @param fCheckXcpts Whether to check for umasked pending exceptions or
6127 * not.
6128 */
6129IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
6130{
6131 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6132
6133 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6134 return iemRaiseDeviceNotAvailable(pIemCpu);
6135
6136 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
6137 if (fCheckXcpts && TODO )
6138 return iemRaiseMathFault(pIemCpu);
6139 */
6140
6141 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
6142 pXState->x87.FCW = 0x37f;
6143 pXState->x87.FSW = 0;
6144 pXState->x87.FTW = 0x00; /* 0 - empty. */
6145 pXState->x87.FPUDP = 0;
6146 pXState->x87.DS = 0; //??
6147 pXState->x87.Rsrvd2= 0;
6148 pXState->x87.FPUIP = 0;
6149 pXState->x87.CS = 0; //??
6150 pXState->x87.Rsrvd1= 0;
6151 pXState->x87.FOP = 0;
6152
6153 iemHlpUsedFpu(pIemCpu);
6154 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6155 return VINF_SUCCESS;
6156}
6157
6158
6159/**
6160 * Implements 'FXSAVE'.
6161 *
6162 * @param iEffSeg The effective segment.
6163 * @param GCPtrEff The address of the image.
6164 * @param enmEffOpSize The operand size (only REX.W really matters).
6165 */
6166IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6167{
6168 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6169
6170 /*
6171 * Raise exceptions.
6172 */
6173 if (pCtx->cr0 & X86_CR0_EM)
6174 return iemRaiseUndefinedOpcode(pIemCpu);
6175 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6176 return iemRaiseDeviceNotAvailable(pIemCpu);
6177 if (GCPtrEff & 15)
6178 {
6179 /** @todo CPU/VM detection possible! \#AC might not be signal for
6180 * all/any misalignment sizes, intel says its an implementation detail. */
6181 if ( (pCtx->cr0 & X86_CR0_AM)
6182 && pCtx->eflags.Bits.u1AC
6183 && pIemCpu->uCpl == 3)
6184 return iemRaiseAlignmentCheckException(pIemCpu);
6185 return iemRaiseGeneralProtectionFault0(pIemCpu);
6186 }
6187
6188 /*
6189 * Access the memory.
6190 */
6191 void *pvMem512;
6192 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6193 if (rcStrict != VINF_SUCCESS)
6194 return rcStrict;
6195 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
6196 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
6197
6198 /*
6199 * Store the registers.
6200 */
6201 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6202 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
6203
6204 /* common for all formats */
6205 pDst->FCW = pSrc->FCW;
6206 pDst->FSW = pSrc->FSW;
6207 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6208 pDst->FOP = pSrc->FOP;
6209 pDst->MXCSR = pSrc->MXCSR;
6210 pDst->MXCSR_MASK = pSrc->MXCSR_MASK;
6211 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
6212 {
6213 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
6214 * them for now... */
6215 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6216 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6217 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6218 pDst->aRegs[i].au32[3] = 0;
6219 }
6220
6221 /* FPU IP, CS, DP and DS. */
6222 pDst->FPUIP = pSrc->FPUIP;
6223 pDst->CS = pSrc->CS;
6224 pDst->FPUDP = pSrc->FPUDP;
6225 pDst->DS = pSrc->DS;
6226 if (enmEffOpSize == IEMMODE_64BIT)
6227 {
6228 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
6229 pDst->Rsrvd1 = pSrc->Rsrvd1;
6230 pDst->Rsrvd2 = pSrc->Rsrvd2;
6231 pDst->au32RsrvdForSoftware[0] = 0;
6232 }
6233 else
6234 {
6235 pDst->Rsrvd1 = 0;
6236 pDst->Rsrvd2 = 0;
6237 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
6238 }
6239
6240 /* XMM registers. */
6241 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6242 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6243 || pIemCpu->uCpl != 0)
6244 {
6245 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6246 for (uint32_t i = 0; i < cXmmRegs; i++)
6247 pDst->aXMM[i] = pSrc->aXMM[i];
6248 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
6249 * right? */
6250 }
6251
6252 /*
6253 * Commit the memory.
6254 */
6255 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6256 if (rcStrict != VINF_SUCCESS)
6257 return rcStrict;
6258
6259 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6260 return VINF_SUCCESS;
6261}
6262
6263
6264/**
6265 * Implements 'FXRSTOR'.
6266 *
6267 * @param GCPtrEff The address of the image.
6268 * @param enmEffOpSize The operand size (only REX.W really matters).
6269 */
6270IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6271{
6272 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6273
6274 /*
6275 * Raise exceptions.
6276 */
6277 if (pCtx->cr0 & X86_CR0_EM)
6278 return iemRaiseUndefinedOpcode(pIemCpu);
6279 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6280 return iemRaiseDeviceNotAvailable(pIemCpu);
6281 if (GCPtrEff & 15)
6282 {
6283 /** @todo CPU/VM detection possible! \#AC might not be signal for
6284 * all/any misalignment sizes, intel says its an implementation detail. */
6285 if ( (pCtx->cr0 & X86_CR0_AM)
6286 && pCtx->eflags.Bits.u1AC
6287 && pIemCpu->uCpl == 3)
6288 return iemRaiseAlignmentCheckException(pIemCpu);
6289 return iemRaiseGeneralProtectionFault0(pIemCpu);
6290 }
6291
6292 /*
6293 * Access the memory.
6294 */
6295 void *pvMem512;
6296 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
6297 if (rcStrict != VINF_SUCCESS)
6298 return rcStrict;
6299 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
6300 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
6301
6302 /*
6303 * Check the state for stuff which will #GP(0).
6304 */
6305 uint32_t const fMXCSR = pSrc->MXCSR;
6306 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf);
6307 if (fMXCSR & ~fMXCSR_MASK)
6308 {
6309 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
6310 return iemRaiseGeneralProtectionFault0(pIemCpu);
6311 }
6312
6313 /*
6314 * Load the registers.
6315 */
6316 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6317 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
6318
6319 /* common for all formats */
6320 pDst->FCW = pSrc->FCW;
6321 pDst->FSW = pSrc->FSW;
6322 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6323 pDst->FOP = pSrc->FOP;
6324 pDst->MXCSR = fMXCSR;
6325 /* (MXCSR_MASK is read-only) */
6326 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
6327 {
6328 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6329 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6330 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6331 pDst->aRegs[i].au32[3] = 0;
6332 }
6333
6334 /* FPU IP, CS, DP and DS. */
6335 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6336 {
6337 pDst->FPUIP = pSrc->FPUIP;
6338 pDst->CS = pSrc->CS;
6339 pDst->Rsrvd1 = pSrc->Rsrvd1;
6340 pDst->FPUDP = pSrc->FPUDP;
6341 pDst->DS = pSrc->DS;
6342 pDst->Rsrvd2 = pSrc->Rsrvd2;
6343 }
6344 else
6345 {
6346 pDst->FPUIP = pSrc->FPUIP;
6347 pDst->CS = pSrc->CS;
6348 pDst->Rsrvd1 = 0;
6349 pDst->FPUDP = pSrc->FPUDP;
6350 pDst->DS = pSrc->DS;
6351 pDst->Rsrvd2 = 0;
6352 }
6353
6354 /* XMM registers. */
6355 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6356 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6357 || pIemCpu->uCpl != 0)
6358 {
6359 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6360 for (uint32_t i = 0; i < cXmmRegs; i++)
6361 pDst->aXMM[i] = pSrc->aXMM[i];
6362 }
6363
6364 /*
6365 * Commit the memory.
6366 */
6367 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
6368 if (rcStrict != VINF_SUCCESS)
6369 return rcStrict;
6370
6371 iemHlpUsedFpu(pIemCpu);
6372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6373 return VINF_SUCCESS;
6374}
6375
6376
6377/**
6378 * Commmon routine for fnstenv and fnsave.
6379 *
6380 * @param uPtr Where to store the state.
6381 * @param pCtx The CPU context.
6382 */
6383static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
6384{
6385 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
6386 if (enmEffOpSize == IEMMODE_16BIT)
6387 {
6388 uPtr.pu16[0] = pSrcX87->FCW;
6389 uPtr.pu16[1] = pSrcX87->FSW;
6390 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
6391 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6392 {
6393 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
6394 * protected mode or long mode and we save it in real mode? And vice
6395 * versa? And with 32-bit operand size? I think CPU is storing the
6396 * effective address ((CS << 4) + IP) in the offset register and not
6397 * doing any address calculations here. */
6398 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
6399 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
6400 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
6401 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
6402 }
6403 else
6404 {
6405 uPtr.pu16[3] = pSrcX87->FPUIP;
6406 uPtr.pu16[4] = pSrcX87->CS;
6407 uPtr.pu16[5] = pSrcX87->FPUDP;
6408 uPtr.pu16[6] = pSrcX87->DS;
6409 }
6410 }
6411 else
6412 {
6413 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
6414 uPtr.pu16[0*2] = pSrcX87->FCW;
6415 uPtr.pu16[1*2] = pSrcX87->FSW;
6416 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
6417 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6418 {
6419 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
6420 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
6421 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
6422 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
6423 }
6424 else
6425 {
6426 uPtr.pu32[3] = pSrcX87->FPUIP;
6427 uPtr.pu16[4*2] = pSrcX87->CS;
6428 uPtr.pu16[4*2+1]= pSrcX87->FOP;
6429 uPtr.pu32[5] = pSrcX87->FPUDP;
6430 uPtr.pu16[6*2] = pSrcX87->DS;
6431 }
6432 }
6433}
6434
6435
6436/**
6437 * Commmon routine for fldenv and frstor
6438 *
6439 * @param uPtr Where to store the state.
6440 * @param pCtx The CPU context.
6441 */
6442static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
6443{
6444 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
6445 if (enmEffOpSize == IEMMODE_16BIT)
6446 {
6447 pDstX87->FCW = uPtr.pu16[0];
6448 pDstX87->FSW = uPtr.pu16[1];
6449 pDstX87->FTW = uPtr.pu16[2];
6450 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6451 {
6452 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
6453 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
6454 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
6455 pDstX87->CS = 0;
6456 pDstX87->Rsrvd1= 0;
6457 pDstX87->DS = 0;
6458 pDstX87->Rsrvd2= 0;
6459 }
6460 else
6461 {
6462 pDstX87->FPUIP = uPtr.pu16[3];
6463 pDstX87->CS = uPtr.pu16[4];
6464 pDstX87->Rsrvd1= 0;
6465 pDstX87->FPUDP = uPtr.pu16[5];
6466 pDstX87->DS = uPtr.pu16[6];
6467 pDstX87->Rsrvd2= 0;
6468 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
6469 }
6470 }
6471 else
6472 {
6473 pDstX87->FCW = uPtr.pu16[0*2];
6474 pDstX87->FSW = uPtr.pu16[1*2];
6475 pDstX87->FTW = uPtr.pu16[2*2];
6476 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6477 {
6478 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
6479 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
6480 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
6481 pDstX87->CS = 0;
6482 pDstX87->Rsrvd1= 0;
6483 pDstX87->DS = 0;
6484 pDstX87->Rsrvd2= 0;
6485 }
6486 else
6487 {
6488 pDstX87->FPUIP = uPtr.pu32[3];
6489 pDstX87->CS = uPtr.pu16[4*2];
6490 pDstX87->Rsrvd1= 0;
6491 pDstX87->FOP = uPtr.pu16[4*2+1];
6492 pDstX87->FPUDP = uPtr.pu32[5];
6493 pDstX87->DS = uPtr.pu16[6*2];
6494 pDstX87->Rsrvd2= 0;
6495 }
6496 }
6497
6498 /* Make adjustments. */
6499 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
6500 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
6501 iemFpuRecalcExceptionStatus(pDstX87);
6502 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
6503 * exceptions are pending after loading the saved state? */
6504}
6505
6506
6507/**
6508 * Implements 'FNSTENV'.
6509 *
6510 * @param enmEffOpSize The operand size (only REX.W really matters).
6511 * @param iEffSeg The effective segment register for @a GCPtrEff.
6512 * @param GCPtrEffDst The address of the image.
6513 */
6514IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6515{
6516 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6517 RTPTRUNION uPtr;
6518 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6519 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6520 if (rcStrict != VINF_SUCCESS)
6521 return rcStrict;
6522
6523 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6524
6525 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6526 if (rcStrict != VINF_SUCCESS)
6527 return rcStrict;
6528
6529 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6530 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6531 return VINF_SUCCESS;
6532}
6533
6534
6535/**
6536 * Implements 'FNSAVE'.
6537 *
6538 * @param GCPtrEffDst The address of the image.
6539 * @param enmEffOpSize The operand size.
6540 */
6541IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6542{
6543 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6544 RTPTRUNION uPtr;
6545 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6546 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6547 if (rcStrict != VINF_SUCCESS)
6548 return rcStrict;
6549
6550 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6551 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6552 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6553 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6554 {
6555 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
6556 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
6557 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
6558 }
6559
6560 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6561 if (rcStrict != VINF_SUCCESS)
6562 return rcStrict;
6563
6564 /*
6565 * Re-initialize the FPU context.
6566 */
6567 pFpuCtx->FCW = 0x37f;
6568 pFpuCtx->FSW = 0;
6569 pFpuCtx->FTW = 0x00; /* 0 - empty */
6570 pFpuCtx->FPUDP = 0;
6571 pFpuCtx->DS = 0;
6572 pFpuCtx->Rsrvd2= 0;
6573 pFpuCtx->FPUIP = 0;
6574 pFpuCtx->CS = 0;
6575 pFpuCtx->Rsrvd1= 0;
6576 pFpuCtx->FOP = 0;
6577
6578 iemHlpUsedFpu(pIemCpu);
6579 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6580 return VINF_SUCCESS;
6581}
6582
6583
6584
6585/**
6586 * Implements 'FLDENV'.
6587 *
6588 * @param enmEffOpSize The operand size (only REX.W really matters).
6589 * @param iEffSeg The effective segment register for @a GCPtrEff.
6590 * @param GCPtrEffSrc The address of the image.
6591 */
6592IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6593{
6594 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6595 RTCPTRUNION uPtr;
6596 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6597 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6598 if (rcStrict != VINF_SUCCESS)
6599 return rcStrict;
6600
6601 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6602
6603 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6604 if (rcStrict != VINF_SUCCESS)
6605 return rcStrict;
6606
6607 iemHlpUsedFpu(pIemCpu);
6608 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6609 return VINF_SUCCESS;
6610}
6611
6612
6613/**
6614 * Implements 'FRSTOR'.
6615 *
6616 * @param GCPtrEffSrc The address of the image.
6617 * @param enmEffOpSize The operand size.
6618 */
6619IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6620{
6621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6622 RTCPTRUNION uPtr;
6623 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6624 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6625 if (rcStrict != VINF_SUCCESS)
6626 return rcStrict;
6627
6628 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6629 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6630 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6631 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6632 {
6633 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
6634 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
6635 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
6636 pFpuCtx->aRegs[i].au32[3] = 0;
6637 }
6638
6639 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6640 if (rcStrict != VINF_SUCCESS)
6641 return rcStrict;
6642
6643 iemHlpUsedFpu(pIemCpu);
6644 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6645 return VINF_SUCCESS;
6646}
6647
6648
6649/**
6650 * Implements 'FLDCW'.
6651 *
6652 * @param u16Fcw The new FCW.
6653 */
6654IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
6655{
6656 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6657
6658 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
6659 /** @todo Testcase: Try see what happens when trying to set undefined bits
6660 * (other than 6 and 7). Currently ignoring them. */
6661 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
6662 * according to FSW. (This is was is currently implemented.) */
6663 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6664 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
6665 iemFpuRecalcExceptionStatus(pFpuCtx);
6666
6667 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6668 iemHlpUsedFpu(pIemCpu);
6669 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6670 return VINF_SUCCESS;
6671}
6672
6673
6674
6675/**
6676 * Implements the underflow case of fxch.
6677 *
6678 * @param iStReg The other stack register.
6679 */
6680IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
6681{
6682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6683
6684 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6685 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
6686 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6687 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
6688
6689 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
6690 * registers are read as QNaN and then exchanged. This could be
6691 * wrong... */
6692 if (pFpuCtx->FCW & X86_FCW_IM)
6693 {
6694 if (RT_BIT(iReg1) & pFpuCtx->FTW)
6695 {
6696 if (RT_BIT(iReg2) & pFpuCtx->FTW)
6697 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6698 else
6699 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
6700 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6701 }
6702 else
6703 {
6704 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
6705 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6706 }
6707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6708 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6709 }
6710 else
6711 {
6712 /* raise underflow exception, don't change anything. */
6713 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
6714 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6715 }
6716
6717 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6718 iemHlpUsedFpu(pIemCpu);
6719 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6720 return VINF_SUCCESS;
6721}
6722
6723
6724/**
6725 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
6726 *
6727 * @param cToAdd 1 or 7.
6728 */
6729IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
6730{
6731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6732 Assert(iStReg < 8);
6733
6734 /*
6735 * Raise exceptions.
6736 */
6737 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6738 return iemRaiseDeviceNotAvailable(pIemCpu);
6739
6740 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6741 uint16_t u16Fsw = pFpuCtx->FSW;
6742 if (u16Fsw & X86_FSW_ES)
6743 return iemRaiseMathFault(pIemCpu);
6744
6745 /*
6746 * Check if any of the register accesses causes #SF + #IA.
6747 */
6748 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
6749 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6750 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
6751 {
6752 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
6753 NOREF(u32Eflags);
6754
6755 pFpuCtx->FSW &= ~X86_FSW_C1;
6756 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
6757 if ( !(u16Fsw & X86_FSW_IE)
6758 || (pFpuCtx->FCW & X86_FCW_IM) )
6759 {
6760 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6761 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6762 }
6763 }
6764 else if (pFpuCtx->FCW & X86_FCW_IM)
6765 {
6766 /* Masked underflow. */
6767 pFpuCtx->FSW &= ~X86_FSW_C1;
6768 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6769 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6770 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
6771 }
6772 else
6773 {
6774 /* Raise underflow - don't touch EFLAGS or TOP. */
6775 pFpuCtx->FSW &= ~X86_FSW_C1;
6776 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6777 fPop = false;
6778 }
6779
6780 /*
6781 * Pop if necessary.
6782 */
6783 if (fPop)
6784 {
6785 pFpuCtx->FTW &= ~RT_BIT(iReg1);
6786 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
6787 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
6788 }
6789
6790 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6791 iemHlpUsedFpu(pIemCpu);
6792 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6793 return VINF_SUCCESS;
6794}
6795
6796/** @} */
6797
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette